Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
8c094ae
Merge pull request #830 from nearai/staging-promote/3a2989d0-22888378864
henrypark133 Mar 10, 2026
a677b20
chore: promote staging to main (2026-03-10 15:19 UTC) (#865)
ironclaw-ci[bot] Mar 11, 2026
6116c88
merge: resolve main into staging-promote (ChannelSecretUpdater import)
henrypark133 Mar 11, 2026
7a9396f
Merge pull request #904 from nearai/staging-promote/3a841b30-22928320566
henrypark133 Mar 11, 2026
6aae1f8
Merge pull request #907 from nearai/staging-promote/b0214fef-22930316561
henrypark133 Mar 11, 2026
6a1301b
feat(i18n): Add internationalization support with Chinese and English…
ironclaw-ci[bot] Mar 11, 2026
7e8c0fb
chore: release v0.18.0 (#885)
github-actions[bot] Mar 11, 2026
edca67e
Merge pull request #912 from nearai/staging-promote/55b5a462-22934480277
henrypark133 Mar 11, 2026
8391415
chore: update WASM artifact SHA256 checksums [skip ci] (#954)
github-actions[bot] Mar 11, 2026
ffbc0cd
Merge pull request #962 from nearai/staging-promote/d313f44a-22974575035
henrypark133 Mar 11, 2026
696d6a0
Merge pull request #957 from nearai/staging-promote/34550add-22970193833
henrypark133 Mar 11, 2026
99dadcb
Merge pull request #925 from nearai/staging-promote/8f513428-22941325130
henrypark133 Mar 11, 2026
d7024f5
Merge pull request #917 from nearai/staging-promote/369741fc-22935740447
henrypark133 Mar 11, 2026
8c2131d
feat(embeddings): add EMBEDDING_BASE_URL for OpenAI-compatible embedd…
smkrv Mar 12, 2026
a71a503
Merge pull request #1065 from nearai/staging-promote/f776d963-2301719…
henrypark133 Mar 12, 2026
3149c91
Merge pull request #1102 from nearai/staging-promote/1e00b1fe-2303636…
henrypark133 Mar 13, 2026
2b8063a
Merge pull request #1096 from nearai/staging-promote/3c619b62-2303503…
henrypark133 Mar 13, 2026
a3c99f2
Merge branch 'main' into staging-promote/e2eb340c-22999151534
henrypark133 Mar 13, 2026
ca6d9f6
fix(registry): bump versions for github, web-search, and discord exte…
henrypark133 Mar 13, 2026
f470f5d
Merge pull request #1032 from nearai/staging-promote/e2eb340c-2299915…
henrypark133 Mar 13, 2026
3debe41
Merge pull request #1149 from nearai/staging-promote/2b625ef3-2306847…
henrypark133 Mar 13, 2026
c4e098d
Fix subagent monitor events being treated as user input (#1173)
pikaxinge Mar 15, 2026
877f117
feat(transcription): add Chat Completions API provider for audio tran…
smkrv Mar 16, 2026
0245c0f
feat(orchestrator): read ORCHESTRATOR_PORT env var for configurable A…
smkrv Mar 16, 2026
de214c2
feat: add LLM_CHEAP_MODEL for generic smart routing across all backen…
smkrv Mar 16, 2026
ccdce69
Merge pull request #1185 from nearai/staging-promote/71b1a677-2309634…
henrypark133 Mar 16, 2026
aa3fac3
Merge pull request #1182 from nearai/staging-promote/579c4fdb-2309533…
henrypark133 Mar 16, 2026
190c70c
Merge pull request #1176 from nearai/staging-promote/17706632-2309443…
henrypark133 Mar 16, 2026
4277a5a
Merge pull request #1159 from nearai/staging-promote/f9b880c2-2308045…
henrypark133 Mar 16, 2026
d1c1bc7
Merge pull request #1145 from nearai/staging-promote/7d745d54-2306660…
henrypark133 Mar 16, 2026
a580c1d
Merge pull request #1137 from nearai/staging-promote/f53c1bb1-2306425…
henrypark133 Mar 16, 2026
4c7afdb
Merge pull request #1134 from nearai/staging-promote/bc672520-2306208…
henrypark133 Mar 16, 2026
9aca6a1
Merge pull request #1186 from nearai/staging-promote/8753c482-2309831…
henrypark133 Mar 16, 2026
b8ddbea
Merge pull request #1188 from nearai/staging-promote/c79754df-2309942…
henrypark133 Mar 16, 2026
4890e73
Merge pull request #1132 from nearai/staging-promote/e805ec61-2305963…
henrypark133 Mar 16, 2026
218e877
Merge pull request #1192 from nearai/staging-promote/15ab156d-2310355…
henrypark133 Mar 16, 2026
f2587e1
Merge pull request #1193 from nearai/staging-promote/97b11ffd-2310419…
henrypark133 Mar 16, 2026
ea0fa7c
Merge pull request #1196 from nearai/staging-promote/e74214dc-2310485…
henrypark133 Mar 16, 2026
b50eddf
Merge branch 'main' into fix/resolve-conflicts
nickpismenkov Mar 16, 2026
fc18064
fix: resolve merge conflict fallout and missing config fields
nickpismenkov Mar 16, 2026
e7ddd46
Merge pull request #1262 from nearai/fix/resolve-conflicts
henrypark133 Mar 16, 2026
026beb0
fix: cover staging CI all-features and routine batch regressions (#1256)
henrypark133 Mar 16, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions .github/workflows/e2e.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ on:
- cron: "0 6 * * 1" # Weekly Monday 6 AM UTC
workflow_dispatch:
pull_request:
branches:
- main
paths:
- "src/channels/web/**"
- "tests/e2e/**"
Expand Down Expand Up @@ -50,9 +52,11 @@ jobs:
- group: core
files: "tests/e2e/scenarios/test_connection.py tests/e2e/scenarios/test_chat.py tests/e2e/scenarios/test_sse_reconnect.py tests/e2e/scenarios/test_html_injection.py tests/e2e/scenarios/test_csp.py"
- group: features
files: "tests/e2e/scenarios/test_skills.py tests/e2e/scenarios/test_tool_approval.py"
files: "tests/e2e/scenarios/test_skills.py tests/e2e/scenarios/test_tool_approval.py tests/e2e/scenarios/test_webhook.py"
- group: extensions
files: "tests/e2e/scenarios/test_extensions.py tests/e2e/scenarios/test_extension_oauth.py tests/e2e/scenarios/test_telegram_token_validation.py tests/e2e/scenarios/test_wasm_lifecycle.py tests/e2e/scenarios/test_tool_execution.py tests/e2e/scenarios/test_pairing.py tests/e2e/scenarios/test_oauth_credential_fallback.py tests/e2e/scenarios/test_routine_oauth_credential_injection.py"
files: "tests/e2e/scenarios/test_extensions.py tests/e2e/scenarios/test_extension_oauth.py tests/e2e/scenarios/test_telegram_token_validation.py tests/e2e/scenarios/test_telegram_hot_activation.py tests/e2e/scenarios/test_wasm_lifecycle.py tests/e2e/scenarios/test_tool_execution.py tests/e2e/scenarios/test_pairing.py tests/e2e/scenarios/test_mcp_auth_flow.py tests/e2e/scenarios/test_oauth_credential_fallback.py tests/e2e/scenarios/test_routine_oauth_credential_injection.py"
- group: routines
files: "tests/e2e/scenarios/test_owner_scope.py tests/e2e/scenarios/test_routine_event_batch.py"
steps:
- uses: actions/checkout@v6

Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ jobs:
matrix:
include:
- name: all-features
flags: "--features postgres,libsql,html-to-markdown"
flags: "--all-features"
- name: default
flags: ""
- name: libsql-only
Expand Down
2 changes: 1 addition & 1 deletion registry/channels/discord.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"name": "discord",
"display_name": "Discord Channel",
"kind": "channel",
"version": "0.2.0",
"version": "0.2.1",
"wit_version": "0.3.0",
"description": "Talk to your agent in Discord",
"keywords": [
Expand Down
2 changes: 1 addition & 1 deletion registry/tools/github.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"name": "github",
"display_name": "GitHub",
"kind": "tool",
"version": "0.2.0",
"version": "0.2.1",
"wit_version": "0.3.0",
"description": "GitHub integration for issues, PRs, repos, and code search",
"keywords": [
Expand Down
2 changes: 1 addition & 1 deletion registry/tools/web-search.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"name": "web-search",
"display_name": "Web Search",
"kind": "tool",
"version": "0.2.0",
"version": "0.2.1",
"wit_version": "0.3.0",
"description": "Search the web using Brave Search API",
"keywords": [
Expand Down
1 change: 1 addition & 0 deletions src/agent/dispatcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ impl Agent {
"notify_channel": message.channel,
"notify_user": message.user_id,
"notify_thread_id": message.thread_id,
"notify_metadata": message.metadata,
});

// Build system prompts once for this turn. Two variants: with tools
Expand Down
12 changes: 12 additions & 0 deletions src/config/llm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ impl LlmConfig {
provider: None,
bedrock: None,
request_timeout_secs: 120,
cheap_model: None,
smart_routing_cascade: false,
}
}

Expand Down Expand Up @@ -168,6 +170,14 @@ impl LlmConfig {

let request_timeout_secs = parse_optional_env("LLM_REQUEST_TIMEOUT_SECS", 120)?;

// Generic cheap model (works with any backend).
// Falls back to NearAI-specific cheap_model in provider chain logic.
let cheap_model = optional_env("LLM_CHEAP_MODEL")?;

// Generic smart routing cascade flag.
// Defaults to true. Overrides NearAI-specific smart_routing_cascade.
let smart_routing_cascade = parse_optional_env("SMART_ROUTING_CASCADE", true)?;

Ok(Self {
backend: if is_nearai {
"nearai".to_string()
Expand All @@ -183,6 +193,8 @@ impl LlmConfig {
provider,
bedrock,
request_timeout_secs,
cheap_model,
smart_routing_cascade,
})
}

Expand Down
77 changes: 64 additions & 13 deletions src/config/transcription.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,15 @@ use crate::settings::Settings;
pub struct TranscriptionConfig {
/// Whether audio transcription is enabled.
pub enabled: bool,
/// Provider: "openai" (default).
/// Provider: "openai" (default) or "chat_completions".
pub provider: String,
/// OpenAI API key (reuses OPENAI_API_KEY).
pub openai_api_key: Option<SecretString>,
/// Model to use (default: "whisper-1").
/// Explicit transcription API key (overrides provider-specific keys).
pub api_key: Option<SecretString>,
/// LLM API key (reuses LLM_API_KEY, used as fallback for chat_completions).
pub llm_api_key: Option<SecretString>,
/// Model to use (default depends on provider).
pub model: String,
/// Base URL override for the transcription API.
pub base_url: Option<String>,
Expand All @@ -25,6 +29,8 @@ impl Default for TranscriptionConfig {
enabled: false,
provider: "openai".to_string(),
openai_api_key: None,
api_key: None,
llm_api_key: None,
model: "whisper-1".to_string(),
base_url: None,
}
Expand All @@ -42,38 +48,83 @@ impl TranscriptionConfig {
optional_env("TRANSCRIPTION_PROVIDER")?.unwrap_or_else(|| "openai".to_string());

let openai_api_key = optional_env("OPENAI_API_KEY")?.map(SecretString::from);
let api_key = optional_env("TRANSCRIPTION_API_KEY")?.map(SecretString::from);
let llm_api_key = optional_env("LLM_API_KEY")?.map(SecretString::from);

let model = optional_env("TRANSCRIPTION_MODEL")?.unwrap_or_else(|| "whisper-1".to_string());
let default_model = match provider.as_str() {
"chat_completions" => "google/gemini-2.0-flash-001",
_ => "whisper-1",
};
let model =
optional_env("TRANSCRIPTION_MODEL")?.unwrap_or_else(|| default_model.to_string());

let base_url = optional_env("TRANSCRIPTION_BASE_URL")?;

Ok(Self {
enabled,
provider,
openai_api_key,
api_key,
llm_api_key,
model,
base_url,
})
}

/// Resolve the API key for the configured provider.
///
/// Priority: `TRANSCRIPTION_API_KEY` > provider-specific key.
fn resolve_api_key(&self) -> Option<&SecretString> {
self.api_key
.as_ref()
.or_else(|| match self.provider.as_str() {
"chat_completions" => self.llm_api_key.as_ref().or(self.openai_api_key.as_ref()),
_ => self.openai_api_key.as_ref(),
})
}

/// Create the transcription provider if enabled and configured.
pub fn create_provider(&self) -> Option<Box<dyn crate::transcription::TranscriptionProvider>> {
if !self.enabled {
return None;
}

// Currently only OpenAI Whisper is supported; more providers can be
// added here with a match on self.provider.
let api_key = self.openai_api_key.as_ref()?;
tracing::info!(model = %self.model, "Audio transcription enabled via OpenAI Whisper");
let api_key = self.resolve_api_key()?;

let mut provider = crate::transcription::OpenAiWhisperProvider::new(api_key.clone())
.with_model(&self.model);
match self.provider.as_str() {
"chat_completions" => {
tracing::info!(
model = %self.model,
"Audio transcription enabled via Chat Completions API"
);

if let Some(ref base_url) = self.base_url {
provider = provider.with_base_url(base_url);
}
let mut provider = crate::transcription::ChatCompletionsTranscriptionProvider::new(
api_key.clone(),
)
.with_model(&self.model);

if let Some(ref base_url) = self.base_url {
provider = provider.with_base_url(base_url);
}

Some(Box::new(provider))
Some(Box::new(provider))
}
_ => {
tracing::info!(
model = %self.model,
"Audio transcription enabled via OpenAI Whisper"
);

let mut provider =
crate::transcription::OpenAiWhisperProvider::new(api_key.clone())
.with_model(&self.model);

if let Some(ref base_url) = self.base_url {
provider = provider.with_base_url(base_url);
}

Some(Box::new(provider))
}
}
}
}
36 changes: 27 additions & 9 deletions src/db/tls.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,22 @@
//! certificates — the same TLS stack that `reqwest` already uses for HTTP.

use deadpool_postgres::{Pool, Runtime};
use thiserror::Error;
use tokio_postgres::NoTls;
use tokio_postgres_rustls::MakeRustlsConnect;

use crate::config::SslMode;

#[derive(Debug, Error)]
pub enum CreatePoolError {
#[error("{0}")]
Pool(#[from] deadpool_postgres::CreatePoolError),
#[error("postgres TLS configuration failed: {0}")]
TlsConfig(#[from] rustls::Error),
}

/// Build a rustls-based TLS connector using the platform's root certificate store.
fn make_rustls_connector() -> MakeRustlsConnect {
fn make_rustls_connector() -> Result<MakeRustlsConnect, rustls::Error> {
let mut root_store = rustls::RootCertStore::empty();
let native = rustls_native_certs::load_native_certs();
for e in &native.errors {
Expand All @@ -25,10 +34,15 @@ fn make_rustls_connector() -> MakeRustlsConnect {
if root_store.is_empty() {
tracing::error!("no system root certificates found -- TLS connections will fail");
}
let config = rustls::ClientConfig::builder()
.with_root_certificates(root_store)
.with_no_client_auth();
MakeRustlsConnect::new(config)
// `--all-features` brings in both aws-lc-rs and ring-backed rustls providers.
// Pick the same ring provider reqwest already uses so postgres TLS setup stays deterministic.
let config = rustls::ClientConfig::builder_with_provider(
rustls::crypto::ring::default_provider().into(),
)
.with_safe_default_protocol_versions()?
.with_root_certificates(root_store)
.with_no_client_auth();
Ok(MakeRustlsConnect::new(config))
}

/// Create a [`deadpool_postgres::Pool`] with the appropriate TLS connector.
Expand All @@ -45,12 +59,16 @@ fn make_rustls_connector() -> MakeRustlsConnect {
pub fn create_pool(
config: &deadpool_postgres::Config,
ssl_mode: SslMode,
) -> Result<Pool, deadpool_postgres::CreatePoolError> {
) -> Result<Pool, CreatePoolError> {
match ssl_mode {
SslMode::Disable => config.create_pool(Some(Runtime::Tokio1), NoTls),
SslMode::Disable => config
.create_pool(Some(Runtime::Tokio1), NoTls)
.map_err(CreatePoolError::from),
SslMode::Prefer | SslMode::Require => {
let tls = make_rustls_connector();
config.create_pool(Some(Runtime::Tokio1), tls)
let tls = make_rustls_connector()?;
config
.create_pool(Some(Runtime::Tokio1), tls)
.map_err(CreatePoolError::from)
}
}
}
Expand Down
24 changes: 24 additions & 0 deletions src/llm/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,30 @@ pub struct LlmConfig {
/// Default: 120. Increase for local LLMs (Ollama, vLLM, LM Studio) that
/// need more time for prompt evaluation on consumer hardware.
pub request_timeout_secs: u64,
/// Generic cheap/fast model for lightweight tasks (heartbeat, routing, evaluation).
/// Works with any backend. Set via `LLM_CHEAP_MODEL` env var.
/// When set, takes priority over the NearAI-specific `NEARAI_CHEAP_MODEL`.
pub cheap_model: Option<String>,
/// Enable cascade mode for smart routing (retry with primary if cheap model
/// response seems uncertain). Default: true. Set via `SMART_ROUTING_CASCADE`.
pub smart_routing_cascade: bool,
}

impl LlmConfig {
/// Resolve the effective cheap model name.
///
/// Resolution order:
/// 1. `LLM_CHEAP_MODEL` (generic, works with any backend)
/// 2. `NEARAI_CHEAP_MODEL` (NearAI-only, backward compatibility)
pub fn cheap_model_name(&self) -> Option<&str> {
self.cheap_model.as_deref().or_else(|| {
if self.backend == "nearai" {
self.nearai.cheap_model.as_deref()
} else {
None
}
})
}
}

/// NEAR AI configuration.
Expand Down
Loading
Loading