From ddfa610f7b6502749cf8c8a4d28799919587c653 Mon Sep 17 00:00:00 2001 From: Utkarsh Date: Wed, 12 Nov 2025 23:34:23 +0530 Subject: [PATCH 1/2] docs: document withRetry utility in services layer --- apps/whispering/src/lib/services/README.md | 34 ++++++++++++++++++++-- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/apps/whispering/src/lib/services/README.md b/apps/whispering/src/lib/services/README.md index 19f7227590..b80f937028 100644 --- a/apps/whispering/src/lib/services/README.md +++ b/apps/whispering/src/lib/services/README.md @@ -121,6 +121,36 @@ async function transcribe( } ``` +## 🧩 Utility Wrappers + +withRetry β€” Retry + Timeout for External Calls +withRetry is a shared utility for wrapping async operations that may fail transiently (e.g., network hiccups, rate limits). It adds retry logic and timeout protection to external API calls, ensuring contributor-safe resilience across services. + +Usage: +import { withRetry } from '$lib/services/completion/utils/withRetry'; + +const result = await withRetry(() => apiCall(), { +retries: 2, +delayMs: 1000, +timeoutMs: 8000, +}); + +Options: + +- retries: number of retry attempts (default: 2) +- delayMs: delay between retries in milliseconds (default: 1000) +- timeoutMs: max time before aborting the call (default: 8000) + +Used in: + +- openai.ts +- groq.ts +- deepgram.ts +- elevenlabs.ts +- mistral.ts + +This utility ensures consistent retry behavior across all transcription services without duplicating logic. It’s designed to be pure, testable, and platform-agnostic. + ## Service-Specific Error Types Each service defines its own `TaggedError` type to represent domain-specific failures. These error types are part of the service's public API and contain all the context needed to understand what went wrong: @@ -222,9 +252,7 @@ export function createManualRecorderService() { startRecording: async ( recordingSettings, { sendStatus }, - ): Promise< - Result - > => { + ): Promise> => { if (activeRecording) { return Err({ name: 'RecorderServiceError', From ed19108e689982e12ed93bec28321cd01d7438c5 Mon Sep 17 00:00:00 2001 From: Utkarsh Date: Wed, 12 Nov 2025 23:35:44 +0530 Subject: [PATCH 2/2] feat: add withRetry utility and wrap all transcription services --- apps/demo-mcp/.data/reddit.db | 0 .../services/completion/utils/withRetry.ts | 26 ++++++++++++ .../services/transcription/cloud/deepgram.ts | 28 ++++++++----- .../transcription/cloud/elevenlabs.ts | 30 +++++++++----- .../lib/services/transcription/cloud/groq.ts | 39 +++++++++++------- .../services/transcription/cloud/mistral.ts | 35 ++++++++++------ .../services/transcription/cloud/openai.ts | 41 +++++++++++-------- apps/whispering/svelte.config.js | 15 +++---- bun.lock | 6 +-- 9 files changed, 141 insertions(+), 79 deletions(-) create mode 100644 apps/demo-mcp/.data/reddit.db create mode 100644 apps/whispering/src/lib/services/completion/utils/withRetry.ts diff --git a/apps/demo-mcp/.data/reddit.db b/apps/demo-mcp/.data/reddit.db new file mode 100644 index 0000000000..e69de29bb2 diff --git a/apps/whispering/src/lib/services/completion/utils/withRetry.ts b/apps/whispering/src/lib/services/completion/utils/withRetry.ts new file mode 100644 index 0000000000..7fdda968a3 --- /dev/null +++ b/apps/whispering/src/lib/services/completion/utils/withRetry.ts @@ -0,0 +1,26 @@ +export async function withRetry( + fn: () => Promise, + options: { + retries?: number; + delayMs?: number; + timeoutMs?: number; + }, +): Promise { + const { retries = 3, delayMs = 500, timeoutMs = 10000 } = options; + + for (let attempt = 0; attempt <= retries; attempt++) { + try { + const result = await Promise.race([ + fn(), + new Promise((_, reject) => + setTimeout(() => reject(new Error('Timeout')), timeoutMs), + ), + ]); + return result; + } catch (_error) { + if (attempt === retries) throw _error; + await new Promise((resolve) => setTimeout(resolve, delayMs)); + } + } + throw new Error('Retry failed'); +} diff --git a/apps/whispering/src/lib/services/transcription/cloud/deepgram.ts b/apps/whispering/src/lib/services/transcription/cloud/deepgram.ts index b633901335..e9ca4a5a6f 100644 --- a/apps/whispering/src/lib/services/transcription/cloud/deepgram.ts +++ b/apps/whispering/src/lib/services/transcription/cloud/deepgram.ts @@ -1,6 +1,7 @@ import { Ok, type Result } from 'wellcrafted/result'; import { z } from 'zod'; import { WhisperingErr, type WhisperingError } from '$lib/result'; +import { withRetry } from '$lib/services/completion/utils/withRetry'; import type { HttpService } from '$lib/services/http'; import { HttpServiceLive } from '$lib/services/http'; import type { Settings } from '$lib/settings'; @@ -118,16 +119,23 @@ export function createDeepgramTranscriptionService({ } // Send raw audio data directly as recommended by Deepgram docs - const { data: deepgramResponse, error: postError } = - await HttpService.post({ - url: `https://api.deepgram.com/v1/listen?${params.toString()}`, - body: audioBlob, // Send raw audio blob directly - headers: { - Authorization: `Token ${options.apiKey}`, - 'Content-Type': audioBlob.type || 'audio/*', // Use the blob's mime type or fallback to audio/* - }, - schema: deepgramResponseSchema, - }); + const { data: deepgramResponse, error: postError } = await withRetry( + () => + HttpService.post({ + url: `https://api.deepgram.com/v1/listen?${params.toString()}`, + body: audioBlob, + headers: { + Authorization: `Token ${options.apiKey}`, + 'Content-Type': audioBlob.type || 'audio/*', + }, + schema: deepgramResponseSchema, + }), + { + retries: 2, + delayMs: 1000, + timeoutMs: 8000, + }, + ); if (postError) { switch (postError.name) { diff --git a/apps/whispering/src/lib/services/transcription/cloud/elevenlabs.ts b/apps/whispering/src/lib/services/transcription/cloud/elevenlabs.ts index 97421b47f0..43d1452b9c 100644 --- a/apps/whispering/src/lib/services/transcription/cloud/elevenlabs.ts +++ b/apps/whispering/src/lib/services/transcription/cloud/elevenlabs.ts @@ -1,6 +1,7 @@ import { ElevenLabsClient } from 'elevenlabs'; import { Ok, type Result } from 'wellcrafted/result'; import { WhisperingErr, type WhisperingError } from '$lib/result'; +import { withRetry } from '$lib/services/completion/utils/withRetry'; import type { Settings } from '$lib/settings'; export const ELEVENLABS_TRANSCRIPTION_MODELS = [ @@ -62,17 +63,24 @@ export function createElevenLabsTranscriptionService() { } // Use the client's speechToText functionality - const transcription = await client.speechToText.convert({ - file: audioBlob, - model_id: options.modelName, - // Map outputLanguage if not set to 'auto' - language_code: - options.outputLanguage !== 'auto' - ? options.outputLanguage - : undefined, - tag_audio_events: false, - diarize: true, - }); + const transcription = await withRetry( + () => + client.speechToText.convert({ + file: audioBlob, + model_id: options.modelName, + language_code: + options.outputLanguage !== 'auto' + ? options.outputLanguage + : undefined, + tag_audio_events: false, + diarize: true, + }), + { + retries: 2, + delayMs: 1000, + timeoutMs: 8000, + }, + ); // Return the transcribed text return Ok(transcription.text.trim()); diff --git a/apps/whispering/src/lib/services/transcription/cloud/groq.ts b/apps/whispering/src/lib/services/transcription/cloud/groq.ts index af28382247..445c7585a5 100644 --- a/apps/whispering/src/lib/services/transcription/cloud/groq.ts +++ b/apps/whispering/src/lib/services/transcription/cloud/groq.ts @@ -2,6 +2,7 @@ import Groq from 'groq-sdk'; import { Err, Ok, type Result, tryAsync, trySync } from 'wellcrafted/result'; import { WhisperingErr, type WhisperingError } from '$lib/result'; import { getExtensionFromAudioBlob } from '$lib/services/_utils'; +import { withRetry } from '$lib/services/completion/utils/withRetry'; import type { Settings } from '$lib/settings'; export const GROQ_MODELS = [ @@ -95,21 +96,29 @@ export function createGroqTranscriptionService() { // Make the transcription request const { data: transcription, error: groqApiError } = await tryAsync({ try: () => - new Groq({ - apiKey: options.apiKey, - dangerouslyAllowBrowser: true, - }).audio.transcriptions.create({ - file, - model: options.modelName, - language: - options.outputLanguage === 'auto' - ? undefined - : options.outputLanguage, - prompt: options.prompt ? options.prompt : undefined, - temperature: options.temperature - ? Number.parseFloat(options.temperature) - : undefined, - }), + withRetry( + () => + new Groq({ + apiKey: options.apiKey, + dangerouslyAllowBrowser: true, + }).audio.transcriptions.create({ + file, + model: options.modelName, + language: + options.outputLanguage === 'auto' + ? undefined + : options.outputLanguage, + prompt: options.prompt || undefined, + temperature: options.temperature + ? Number.parseFloat(options.temperature) + : undefined, + }), + { + retries: 2, + delayMs: 1000, + timeoutMs: 8000, + }, + ), catch: (error) => { // Check if it's NOT a Groq API error if (!(error instanceof Groq.APIError)) { diff --git a/apps/whispering/src/lib/services/transcription/cloud/mistral.ts b/apps/whispering/src/lib/services/transcription/cloud/mistral.ts index c40e4396b9..86abe8825a 100644 --- a/apps/whispering/src/lib/services/transcription/cloud/mistral.ts +++ b/apps/whispering/src/lib/services/transcription/cloud/mistral.ts @@ -2,6 +2,7 @@ import { Mistral } from '@mistralai/mistralai'; import { Err, Ok, type Result, tryAsync, trySync } from 'wellcrafted/result'; import { WhisperingErr, type WhisperingError } from '$lib/result'; import { getExtensionFromAudioBlob } from '$lib/services/_utils'; +import { withRetry } from '$lib/services/completion/utils/withRetry'; import type { Settings } from '$lib/settings'; export const MISTRAL_TRANSCRIPTION_MODELS = [ { @@ -78,19 +79,27 @@ export function createMistralTranscriptionService() { // Make the transcription request const { data: transcription, error: mistralApiError } = await tryAsync({ try: () => - new Mistral({ - apiKey: options.apiKey, - }).audio.transcriptions.complete({ - file, - model: options.modelName, - language: - options.outputLanguage !== 'auto' - ? options.outputLanguage - : undefined, - temperature: options.temperature - ? Number.parseFloat(options.temperature) - : undefined, - }), + withRetry( + () => + new Mistral({ + apiKey: options.apiKey, + }).audio.transcriptions.complete({ + file, + model: options.modelName, + language: + options.outputLanguage !== 'auto' + ? options.outputLanguage + : undefined, + temperature: options.temperature + ? Number.parseFloat(options.temperature) + : undefined, + }), + { + retries: 2, + delayMs: 1000, + timeoutMs: 8000, + } + ), catch: (error) => { // Return the error directly for processing return Err(error); diff --git a/apps/whispering/src/lib/services/transcription/cloud/openai.ts b/apps/whispering/src/lib/services/transcription/cloud/openai.ts index 13ca7958f2..833b6305c4 100644 --- a/apps/whispering/src/lib/services/transcription/cloud/openai.ts +++ b/apps/whispering/src/lib/services/transcription/cloud/openai.ts @@ -2,6 +2,7 @@ import OpenAI from 'openai'; import { Err, Ok, type Result, tryAsync, trySync } from 'wellcrafted/result'; import { WhisperingErr, type WhisperingError } from '$lib/result'; import { getExtensionFromAudioBlob } from '$lib/services/_utils'; +import { withRetry } from '$lib/services/completion/utils/withRetry'; import type { Settings } from '$lib/settings'; export const OPENAI_TRANSCRIPTION_MODELS = [ @@ -89,7 +90,7 @@ export function createOpenaiTranscriptionService() { `recording.${getExtensionFromAudioBlob(audioBlob)}`, { type: audioBlob.type }, ), - catch: (error) => + catch: (_error) => WhisperingErr({ title: 'πŸ“ File Creation Failed', description: @@ -102,21 +103,29 @@ export function createOpenaiTranscriptionService() { // Call OpenAI API const { data: transcription, error: openaiApiError } = await tryAsync({ try: () => - new OpenAI({ - apiKey: options.apiKey, - dangerouslyAllowBrowser: true, - }).audio.transcriptions.create({ - file, - model: options.modelName, - language: - options.outputLanguage !== 'auto' - ? options.outputLanguage - : undefined, - prompt: options.prompt || undefined, - temperature: options.temperature - ? Number.parseFloat(options.temperature) - : undefined, - }), + withRetry( + () => + new OpenAI({ + apiKey: options.apiKey, + dangerouslyAllowBrowser: true, + }).audio.transcriptions.create({ + file, + model: options.modelName, + language: + options.outputLanguage !== 'auto' + ? options.outputLanguage + : undefined, + prompt: options.prompt || undefined, + temperature: options.temperature + ? Number.parseFloat(options.temperature) + : undefined, + }), + { + retries: 2, + delayMs: 1000, + timeoutMs: 8000, + }, + ), catch: (error) => { // Check if it's NOT an OpenAI API error if (!(error instanceof OpenAI.APIError)) { diff --git a/apps/whispering/svelte.config.js b/apps/whispering/svelte.config.js index 901f8b6d8f..fcb780184f 100644 --- a/apps/whispering/svelte.config.js +++ b/apps/whispering/svelte.config.js @@ -1,7 +1,4 @@ -// Tauri doesn't have a Node.js server to do proper SSR -// so we will use adapter-static to prerender the app (SSG) -// This works for both Tauri and Cloudflare Workers + Assets -// See: https://v2.tauri.app/start/frontend/sveltekit/ for more info +import path from 'node:path'; import staticAdapter from '@sveltejs/adapter-static'; import { vitePreprocess } from '@sveltejs/vite-plugin-svelte'; @@ -9,21 +6,19 @@ import { vitePreprocess } from '@sveltejs/vite-plugin-svelte'; const config = { kit: { adapter: staticAdapter({ - fallback: 'index.html', // SPA fallback for dynamic routes + fallback: 'index.html', }), + alias: { + $lib: path.resolve('./src/lib'), + }, }, - // Consult https://svelte.dev/docs/kit/integrations - // for more information about preprocessors preprocess: vitePreprocess(), vitePlugin: { inspector: { holdMode: true, showToggleButton: 'always', - // Using 'bottom-left' as base position, but CSS overrides in - // src/routes/+layout.svelte move it to bottom-center to avoid - // conflicts with devtools (bottom-left) and toasts (bottom-right) toggleButtonPos: 'bottom-left', toggleKeyCombo: 'meta-shift', }, diff --git a/bun.lock b/bun.lock index d0b03a0816..0b8087ca97 100644 --- a/bun.lock +++ b/bun.lock @@ -1,11 +1,9 @@ { "lockfileVersion": 1, + "configVersion": 0, "workspaces": { "": { "name": "epicenter", - "dependencies": { - "wellcrafted": "catalog:", - }, "devDependencies": { "@biomejs/biome": "^2.3.1", "@eslint/compat": "^1.4.0", @@ -148,7 +146,7 @@ }, "apps/whispering": { "name": "@repo/whispering", - "version": "7.7.0", + "version": "7.7.1", "dependencies": { "@anthropic-ai/sdk": "^0.55.0", "@aptabase/tauri": "^0.4.1",