diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000..5d0b448
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,15 @@
+# Server configuration
+HOST=localhost
+PORT=7000
+
+# SSL configuration
+VERIFY_SSL=true
+
+# Proxy configuration
+USE_PROXY=true
+PROXY_API_URL=https://proxy.scdn.io/api/get_proxy.php
+PROXY_PROTOCOL=http
+PROXY_BATCH_SIZE=5
+
+# Queue configuration
+MAX_QUEUE_SIZE=100
\ No newline at end of file
diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml
new file mode 100644
index 0000000..835ca75
--- /dev/null
+++ b/.github/workflows/docker-build.yml
@@ -0,0 +1,55 @@
+name: Docker Build and Push
+
+on:
+ release:
+ types: [published]
+
+jobs:
+ build-and-push:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+ with:
+ driver: docker-container
+
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ - name: Extract metadata
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: |
+ ${{ secrets.DOCKERHUB_USERNAME }}/ttsfm
+ tags: |
+ type=ref,event=tag
+ type=raw,value=latest
+ labels: |
+ org.opencontainers.image.source=${{ github.repositoryUrl }}
+
+ - name: Build and push
+ id: build-and-push
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ platforms: linux/amd64,linux/arm64
+ push: true
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
+
+ - name: Show image info
+ run: |
+ echo "Pushed tags: ${{ steps.meta.outputs.tags }}"
+ echo "Image digest: ${{ steps.build-and-push.outputs.digest }}"
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..e0e62c9
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,36 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Virtual Environment
+venv/
+env/
+ENV/
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
+
+# OS
+.DS_Store
+Thumbs.db
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
index 0ac99a0..f345c5f 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -10,15 +10,25 @@ COPY requirements.txt .
# Install dependencies
RUN pip install --no-cache-dir -r requirements.txt
-# Copy the application files
-COPY server.py .
-COPY index.html .
-COPY index_zh.html .
-COPY script.js .
-COPY styles.css .
+# Copy all application directories and files
+COPY main.py .
+COPY server/ server/
+COPY proxy/ proxy/
+COPY utils/ utils/
+COPY static/ static/
+
+# Set default environment variables
+ENV HOST=0.0.0.0 \
+ PORT=7000 \
+ VERIFY_SSL=true \
+ USE_PROXY=true \
+ PROXY_API_URL=https://proxy.scdn.io/api/get_proxy.php \
+ PROXY_PROTOCOL=http \
+ PROXY_BATCH_SIZE=5 \
+ MAX_QUEUE_SIZE=100
# Expose port 7000
EXPOSE 7000
-# Command to run the application with host set to 0.0.0.0
-CMD ["python", "server.py", "--host", "0.0.0.0"]
\ No newline at end of file
+# Command to run the application
+CMD ["python", "main.py"]
\ No newline at end of file
diff --git a/README.md b/README.md
index 4f23738..0623514 100644
--- a/README.md
+++ b/README.md
@@ -1,90 +1,134 @@
# TTSFM
-[](https://hub.docker.com/r/dbcccc/ttsfm)
+[](https://hub.docker.com/r/dbcccc/ttsfm)
[](LICENSE)
+[](https://github.com/dbccccccc/ttsfm)
-[English](README.md) | [中文](README_CN.md)
+> ⚠️ **Disclaimer**
+> This project is for learning and testing purposes only. For production environments, please use [OpenAI's official TTS service](https://platform.openai.com/docs/guides/audio).
-TTSFM is a reverse-engineered API server that mirrors OpenAI's TTS service, providing a compatible interface for text-to-speech conversion with multiple voice options.
+English | [中文](README_CN.md)
-### Prerequisites
-- Python 3.8 or higher
-- pip (Python package manager)
-- OR Docker
+## 🌟 Project Introduction
-### Installation
+TTSFM is a reverse-engineered API server that is fully compatible with OpenAI's Text-to-Speech (TTS) interface.
-#### Option 1: Using Docker (Recommended)
-```bash
-docker pull dbcccc/ttsfm:latest
-docker run -p 7000:7000 dbcccc/ttsfm:latest
+> 🎮 Try it now: [Official Demo Site](https://ttsapi.fm)
+
+## 🏗️ Project Structure
+
+```text
+ttsfm/
+├── main.py # Application entry
+├── server/ # Core services
+│ ├── api.py # OpenAI-compatible API
+│ └── handlers.py # Request handlers
+├── proxy/ # Proxy system
+│ └── manager.py
+├── utils/ # Utility modules
+│ └── config.py
+├── static/ # Frontend resources
+│ ├── index.html # English interface
+│ ├── index_zh.html # Chinese interface
+│ └── ... # JS/CSS resources
+└── requirements.txt # Python dependencies
```
-#### Option 2: Manual Installation
-1. Clone the repository:
+## 🚀 Quick Start
+
+### System Requirements
+- Python ≥ 3.8
+- Or Docker environment
+
+### 🐳 Docker Run (Recommended)
+
+Basic usage:
```bash
-git clone https://github.com/yourusername/ttsfm.git
-cd ttsfm
+docker run -p 7000:7000 dbcccc/ttsfm:latest
```
-2. Install dependencies:
+Custom configuration using environment variables:
```bash
-pip install -r requirements.txt
+docker run -d \
+ -p 7000:7000 \
+ -e HOST=0.0.0.0 \
+ -e PORT=7000 \
+ -e VERIFY_SSL=true \
+ -e USE_PROXY=false \
+ -e PROXY_API_URL=https://proxy.scdn.io/api/get_proxy.php \
+ -e PROXY_PROTOCOL=http \
+ -e PROXY_BATCH_SIZE=5 \
+ -e MAX_QUEUE_SIZE=100 \
+ dbcccc/ttsfm:latest
```
-### Usage
+Available environment variables:
+- `HOST`: Server host (default: 0.0.0.0)
+- `PORT`: Server port (default: 7000)
+- `VERIFY_SSL`: Whether to verify SSL certificates (default: true)
+- `USE_PROXY`: Whether to use proxy pool (default: true)
+- `PROXY_API_URL`: Proxy API URL (default: https://proxy.scdn.io/api/get_proxy.php)
+- `PROXY_PROTOCOL`: Proxy protocol (default: http)
+- `PROXY_BATCH_SIZE`: Number of proxies to fetch at once (default: 5)
+- `MAX_QUEUE_SIZE`: Maximum number of tasks in queue (default: 100)
+
+> 💡 **Tip**
+> MacOS users experiencing port conflicts can use alternative ports:
+> `docker run -p 5051:7000 dbcccc/ttsfm:latest`
-#### Option 1: Using Docker
-1. The server will start automatically after running the docker command
-2. Access the web interface at `http://localhost:7000`
+### 📦 Manual Installation
-#### Option 2: Manual Usage
-1. Start the server:
+1. Download the latest release package from [GitHub Releases](https://github.com/dbccccccc/ttsfm/releases)
+2. Extract and enter the directory:
```bash
-python server.py
+tar -zxvf ttsfm-vX.X.X.tar.gz
+cd ttsfm-vX.X.X
+```
+3. Install dependencies and launch:
+```bash
+pip install -r requirements.txt
+cp .env.example .env # Edit config as needed
+python main.py
```
-2. Access the web interface at `http://localhost:7000`
+## 📚 Usage Guide
-3. Use the API endpoint
+### Web Interface
+Access `http://localhost:7000` to experience the interactive demo
### API Endpoints
-Please refer to the deployed webpage for detailed information.
-- `POST /v1/audio/speech`: Convert text to speech
-- `GET /v1/voices`: List available voices
+| Endpoint | Method | Description |
+|------|------|-------------|
+| `/v1/audio/speech` | POST | Text-to-Speech |
+| `/api/queue-size` | GET | Query task queue |
-### Pressure Testing
-The project includes a pressure test script to evaluate server performance under load. To use it:
+> 🔍 Complete API documentation is available via the web interface after local deployment
+### 🧪 Stress Testing
```bash
-# Basic test (10 requests, 2 concurrent connections)
+# Basic test
python pressure_test.py
-# Test with more requests and higher concurrency
-python pressure_test.py -n 50 -c 10
+# Custom test example
+python pressure_test.py -n 50 -c 10 -t long -s
+```
-# Test with different text lengths
-python pressure_test.py -t short # Short text
-python pressure_test.py -t medium # Medium text (default)
-python pressure_test.py -t long # Long text
+**Parameter Explanation**:
+- `-n` Total requests
+- `-c` Concurrency count
+- `-t` Text length (short/medium/long)
+- `-s` Save generated audio
-# Save generated audio files
-python pressure_test.py -s
+## 🤝 Contributing
-# Custom server URL
-python pressure_test.py -u http://localhost:7000
-```
+We welcome all forms of contributions! You can participate by:
-Options:
-- `-n, --num-requests`: Total number of requests to send (default: 10)
-- `-c, --concurrency`: Number of concurrent connections (default: 2)
-- `-t, --text-length`: Length of text to use (short/medium/long)
-- `-s, --save-audio`: Save generated audio files to test_output directory
-- `-u, --url`: Custom server URL (default: http://localhost:7000)
+- Submitting [Issues](https://github.com/dbccccccc/ttsfm/issues) to report problems
+- Creating [Pull Requests](https://github.com/dbccccccc/ttsfm/pulls) to improve code
+- Sharing usage experiences and suggestions
-### License
-This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
+📜 Project licensed under [MIT License](LICENSE)
-## Star History
+## 📈 Project Activity
-[](https://www.star-history.com/#dbccccccc/ttsfm&Date)
\ No newline at end of file
+[](https://star-history.com/#dbccccccc/ttsfm&Date)
\ No newline at end of file
diff --git a/README_CN.md b/README_CN.md
index d03c02b..351f18a 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -1,90 +1,135 @@
# TTSFM
-[](https://hub.docker.com/r/dbcccc/ttsfm)
+[](https://hub.docker.com/r/dbcccc/ttsfm)
[](LICENSE)
+[](https://github.com/dbccccccc/ttsfm)
-[English](../README.md) | 中文
+> ⚠️ **免责声明**
+> 此项目仅用于学习测试,生产环境请使用 [OpenAI 官方 TTS 服务](https://platform.openai.com/docs/guides/audio)。
-TTSFM 是一个逆向工程的 API 服务器,镜像了 OpenAI 的 TTS 服务,提供了兼容的文本转语音接口,支持多种语音选项。
+[English](README.md) | 中文
+
+## 🌟 项目简介
+
+TTSFM 是一个逆向工程实现的 API 服务器,完全兼容 OpenAI 的文本转语音(TTS)接口。
+
+> 🎮 立即体验:[官方演示站](https://ttsapi.fm)
+
+
+## 🏗️ 项目结构
+
+```text
+ttsfm/
+├── main.py # 应用入口
+├── server/ # 服务核心
+│ ├── api.py # OpenAI 兼容API
+│ └── handlers.py # 请求处理器
+├── proxy/ # 代理系统
+│ └── manager.py
+├── utils/ # 工具模块
+│ └── config.py
+├── static/ # 前端资源
+│ ├── index.html # 英文界面
+│ ├── index_zh.html # 中文界面
+│ └── ... # JS/CSS 资源
+└── requirements.txt # Python依赖
+```
+
+## 🚀 快速开始
### 系统要求
-- Python 3.8 或更高版本
-- pip(Python 包管理器)
-- 或 Docker
+- Python ≥ 3.8
+- 或 Docker 环境
-### 安装步骤
+### 🐳 Docker 运行(推荐)
-#### 选项一:使用 Docker(推荐)
+基本用法:
```bash
-docker pull dbcccc/ttsfm:latest
docker run -p 7000:7000 dbcccc/ttsfm:latest
```
-#### 选项二:手动安装
-1. 克隆仓库:
+使用环境变量自定义配置:
```bash
-git clone https://github.com/yourusername/ttsfm.git
-cd ttsfm
+docker run -d \
+ -p 7000:7000 \
+ -e HOST=0.0.0.0 \
+ -e PORT=7000 \
+ -e VERIFY_SSL=true \
+ -e USE_PROXY=false \
+ -e PROXY_API_URL=https://proxy.scdn.io/api/get_proxy.php \
+ -e PROXY_PROTOCOL=http \
+ -e PROXY_BATCH_SIZE=5 \
+ -e MAX_QUEUE_SIZE=100 \
+ dbcccc/ttsfm:latest
```
-2. 安装依赖:
-```bash
-pip install -r requirements.txt
-```
+可用的环境变量:
+- `HOST`:服务器主机(默认:0.0.0.0)
+- `PORT`:服务器端口(默认:7000)
+- `VERIFY_SSL`:是否验证 SSL 证书(默认:true)
+- `USE_PROXY`:是否使用代理池(默认:true)
+- `PROXY_API_URL`:代理 API URL(默认:https://proxy.scdn.io/api/get_proxy.php)
+- `PROXY_PROTOCOL`:代理协议(默认:http)
+- `PROXY_BATCH_SIZE`:一次获取的代理数量(默认:5)
+- `MAX_QUEUE_SIZE`:队列最大任务数(默认:100)
-### 使用方法
+> 💡 **提示**
+> MacOS 用户若遇到端口冲突,可替换端口号:
+> `docker run -p 5051:7000 dbcccc/ttsfm:latest`
-#### 选项一:使用 Docker
-1. 运行 docker 命令后服务器将自动启动
-2. 访问网页界面:`http://localhost:7000`
+### 📦 手动安装
-#### 选项二:手动使用
-1. 启动服务器:
+1. 从 [GitHub Releases](https://github.com/dbccccccc/ttsfm/releases) 下载最新版本压缩包
+2. 解压并进入目录:
+```bash
+tar -zxvf ttsfm-vX.X.X.tar.gz
+cd ttsfm-vX.X.X
+```
+3. 安装依赖并启动:
```bash
-python server.py
+pip install -r requirements.txt
+cp .env.example .env # 按需编辑配置
+python main.py
```
-2. 访问网页界面:`http://localhost:7000`
+## 📚 使用指南
-3. 使用 API 接口
+### Web 界面
+访问 `http://localhost:7000` 体验交互式演示
-### API 接口
-具体信息请至部署完成的网页查看。
-- `POST /v1/audio/speech`:文本转语音
-- `GET /v1/voices`:获取可用语音列表
+### API 端点
+| 端点 | 方法 | 描述 |
+|------|------|-------------|
+| `/v1/audio/speech` | POST | 文本转语音 |
+| `/api/queue-size` | GET | 查询任务队列 |
-### 压力测试
-项目包含一个压力测试脚本,用于评估服务器在负载下的性能。使用方法:
+> 🔍 完整 API 文档可在本地部署后通过 Web 界面查看
+### 🧪 压力测试
```bash
-# 基础测试(10个请求,2个并发连接)
+# 基础测试
python pressure_test.py
-# 更多请求和更高并发测试
-python pressure_test.py -n 50 -c 10
+# 自定义测试示例
+python pressure_test.py -n 50 -c 10 -t long -s
+```
-# 不同文本长度测试
-python pressure_test.py -t short # 短文本
-python pressure_test.py -t medium # 中等文本(默认)
-python pressure_test.py -t long # 长文本
+**参数说明**:
+- `-n` 总请求数
+- `-c` 并发数
+- `-t` 文本长度 (short/medium/long)
+- `-s` 保存生成音频
-# 保存生成的音频文件
-python pressure_test.py -s
+## 🤝 参与贡献
-# 自定义服务器地址
-python pressure_test.py -u http://localhost:7000
-```
+我们欢迎所有形式的贡献!您可以通过以下方式参与:
-选项说明:
-- `-n, --num-requests`:发送的总请求数(默认:10)
-- `-c, --concurrency`:并发连接数(默认:2)
-- `-t, --text-length`:使用的文本长度(short/medium/long)
-- `-s, --save-audio`:将生成的音频文件保存到 test_output 目录
-- `-u, --url`:自定义服务器地址(默认:http://localhost:7000)
+- 提交 [Issue](https://github.com/dbccccccc/ttsfm/issues) 报告问题
+- 发起 [Pull Request](https://github.com/dbccccccc/ttsfm/pulls) 改进代码
+- 分享使用体验和建议
-### 许可证
-本项目采用 MIT 许可证 - 详见 [LICENSE](LICENSE) 文件。
+📜 项目采用 [MIT 许可证](LICENSE)
-## Star History
+## 📈 项目动态
-[](https://www.star-history.com/#dbccccccc/ttsfm&Date)
\ No newline at end of file
+[](https://star-history.com/#dbccccccc/ttsfm&Date)
\ No newline at end of file
diff --git a/main.py b/main.py
new file mode 100644
index 0000000..b773448
--- /dev/null
+++ b/main.py
@@ -0,0 +1,67 @@
+"""
+OpenAI TTS API Server
+
+This module provides a server that's compatible with OpenAI's TTS API format.
+This is the main entry point for the application.
+"""
+
+import asyncio
+import aiohttp
+import logging
+import ssl
+import time
+
+from utils.config import load_config, test_connection
+from server.api import TTSServer
+
+# Configure logging
+logging.basicConfig(
+ level=logging.INFO,
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
+)
+logger = logging.getLogger(__name__)
+
+async def main():
+ """Main function to start the server."""
+ args = load_config()
+
+ # Test connection mode
+ if args.test_connection:
+ if args.no_verify_ssl:
+ connector = aiohttp.TCPConnector(ssl=False)
+ session = aiohttp.ClientSession(connector=connector)
+ else:
+ session = aiohttp.ClientSession()
+
+ try:
+ await test_connection(session)
+ finally:
+ await session.close()
+
+ logger.info("Connection test completed")
+ return
+
+ # Start the server
+ server = TTSServer(
+ args.host,
+ args.port,
+ verify_ssl=not args.no_verify_ssl,
+ use_proxy=args.use_proxy,
+ proxy_api=args.proxy_api,
+ proxy_protocol=args.proxy_protocol,
+ proxy_batch_size=args.proxy_batch_size,
+ max_queue_size=args.max_queue_size
+ )
+
+ await server.start()
+
+ try:
+ # Keep the server running
+ while True:
+ await asyncio.sleep(1)
+ except KeyboardInterrupt:
+ await server.stop()
+ logger.info("TTS server stopped")
+
+if __name__ == "__main__":
+ asyncio.run(main())
\ No newline at end of file
diff --git a/proxy/__init__.py b/proxy/__init__.py
new file mode 100644
index 0000000..bb5c4f4
--- /dev/null
+++ b/proxy/__init__.py
@@ -0,0 +1,5 @@
+"""
+Proxy Package
+
+This package contains proxy management functionality for IP rotation.
+"""
\ No newline at end of file
diff --git a/proxy/manager.py b/proxy/manager.py
new file mode 100644
index 0000000..4192e70
--- /dev/null
+++ b/proxy/manager.py
@@ -0,0 +1,104 @@
+"""
+Proxy Manager Module
+
+This module provides functionality for managing a pool of proxies for rotating IP addresses.
+"""
+
+import asyncio
+import aiohttp
+import logging
+import random
+from typing import Optional
+
+# Configure logging
+logging.basicConfig(
+ level=logging.INFO,
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
+)
+logger = logging.getLogger(__name__)
+
+class ProxyManager:
+ """Manages a pool of proxies for rotating IP addresses."""
+
+ def __init__(self, api_url: str = "https://proxy.scdn.io/api/get_proxy.php",
+ protocol: str = "http", batch_size: int = 5):
+ """Initialize the proxy manager.
+
+ Args:
+ api_url: URL of the proxy pool API
+ protocol: Proxy protocol to use (http, https, socks4, socks5, all)
+ batch_size: Number of proxies to fetch at once
+ """
+ self.api_url = api_url
+ self.protocol = protocol
+ self.batch_size = batch_size
+ self.proxies = []
+ self.session = None
+ self.lock = asyncio.Lock()
+
+ async def initialize(self, session: aiohttp.ClientSession):
+ """Initialize the proxy manager with a session.
+
+ Args:
+ session: aiohttp client session to use for requests
+ """
+ self.session = session
+ await self.refresh_proxies()
+
+ async def refresh_proxies(self) -> bool:
+ """Fetch new proxies from the API.
+
+ Returns:
+ bool: True if successful, False otherwise
+ """
+ if not self.session:
+ logger.error("Session not initialized for proxy manager")
+ return False
+
+ try:
+ params = {
+ 'protocol': self.protocol,
+ 'count': self.batch_size
+ }
+
+ async with self.session.get(self.api_url, params=params) as response:
+ if response.status != 200:
+ logger.error(f"Failed to fetch proxies: HTTP {response.status}")
+ return False
+
+ data = await response.json()
+
+ if data.get('code') != 200 or 'data' not in data:
+ logger.error(f"Invalid response from proxy API: {data}")
+ return False
+
+ async with self.lock:
+ self.proxies = data['data']['proxies']
+ logger.info(f"Refreshed proxy pool with {len(self.proxies)} proxies")
+ return True
+
+ except Exception as e:
+ logger.error(f"Error refreshing proxies: {str(e)}")
+ return False
+
+ async def get_proxy(self) -> Optional[str]:
+ """Get a proxy from the pool.
+
+ Returns:
+ str: Proxy URL or None if no proxies available
+ """
+ async with self.lock:
+ if not self.proxies:
+ await self.refresh_proxies()
+ if not self.proxies:
+ return None
+
+ # Get and remove a random proxy
+ proxy = random.choice(self.proxies)
+ self.proxies.remove(proxy)
+
+ # Trigger refresh if running low
+ if len(self.proxies) <= 1:
+ asyncio.create_task(self.refresh_proxies())
+
+ return f"{self.protocol}://{proxy}"
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index eaea551..dc98440 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,3 @@
-requests==2.31.0
aiohttp>=3.8.0
-python-dotenv>=1.0.0
\ No newline at end of file
+python-dotenv>=1.0.0
+urllib3>=2.0.0
\ No newline at end of file
diff --git a/server.py b/server.py
deleted file mode 100644
index ff4d49d..0000000
--- a/server.py
+++ /dev/null
@@ -1,535 +0,0 @@
-"""
-OpenAI TTS API Server
-
-This module provides a server that's compatible with OpenAI's TTS API format.
-"""
-
-import asyncio
-import aiohttp
-from aiohttp import web
-import logging
-from typing import Optional, Dict, Any, List
-from pathlib import Path
-import json
-import time
-import ssl
-import random
-
-# Configure logging
-logging.basicConfig(
- level=logging.INFO,
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
-)
-logger = logging.getLogger(__name__)
-
-class ProxyManager:
- """Manages a pool of proxies for rotating IP addresses."""
-
- def __init__(self, api_url: str = "https://proxy.scdn.io/api/get_proxy.php",
- protocol: str = "http", batch_size: int = 5):
- """Initialize the proxy manager.
-
- Args:
- api_url: URL of the proxy pool API
- protocol: Proxy protocol to use (http, https, socks4, socks5, all)
- batch_size: Number of proxies to fetch at once
- """
- self.api_url = api_url
- self.protocol = protocol
- self.batch_size = batch_size
- self.proxies = []
- self.session = None
- self.lock = asyncio.Lock()
-
- async def initialize(self, session: aiohttp.ClientSession):
- """Initialize the proxy manager with a session.
-
- Args:
- session: aiohttp client session to use for requests
- """
- self.session = session
- await self.refresh_proxies()
-
- async def refresh_proxies(self) -> bool:
- """Fetch new proxies from the API.
-
- Returns:
- bool: True if successful, False otherwise
- """
- if not self.session:
- logger.error("Session not initialized for proxy manager")
- return False
-
- try:
- params = {
- 'protocol': self.protocol,
- 'count': self.batch_size
- }
-
- async with self.session.get(self.api_url, params=params) as response:
- if response.status != 200:
- logger.error(f"Failed to fetch proxies: HTTP {response.status}")
- return False
-
- data = await response.json()
-
- if data.get('code') != 200 or 'data' not in data:
- logger.error(f"Invalid response from proxy API: {data}")
- return False
-
- async with self.lock:
- self.proxies = data['data']['proxies']
- logger.info(f"Refreshed proxy pool with {len(self.proxies)} proxies")
- return True
-
- except Exception as e:
- logger.error(f"Error refreshing proxies: {str(e)}")
- return False
-
- async def get_proxy(self) -> Optional[str]:
- """Get a proxy from the pool.
-
- Returns:
- str: Proxy URL or None if no proxies available
- """
- async with self.lock:
- if not self.proxies:
- await self.refresh_proxies()
- if not self.proxies:
- return None
-
- # Get and remove a random proxy
- proxy = random.choice(self.proxies)
- self.proxies.remove(proxy)
-
- # Trigger refresh if running low
- if len(self.proxies) <= 1:
- asyncio.create_task(self.refresh_proxies())
-
- return f"{self.protocol}://{proxy}"
-
-class TTSServer:
- """Server that's compatible with OpenAI's TTS API."""
-
- def __init__(self, host: str = "localhost", port: int = 7000,
- max_queue_size: int = 100, verify_ssl: bool = True,
- use_proxy: bool = False):
- """Initialize the TTS server.
-
- Args:
- host: Host to bind to
- port: Port to bind to
- max_queue_size: Maximum number of tasks in queue
- verify_ssl: Whether to verify SSL certificates when connecting to external services
- use_proxy: Whether to use a proxy pool for requests
- """
- self.host = host
- self.port = port
- self.app = web.Application()
- self.verify_ssl = verify_ssl
- self.use_proxy = use_proxy
-
- # Initialize queue system
- self.queue = asyncio.Queue(maxsize=max_queue_size)
- self.current_task = None
- self.processing_lock = asyncio.Lock()
-
- # Initialize proxy manager if needed
- self.proxy_manager = ProxyManager() if use_proxy else None
-
- # OpenAI compatible endpoint
- self.app.router.add_post('/v1/audio/speech', self.handle_openai_speech)
- self.app.router.add_get('/api/queue-size', self.handle_queue_size)
- self.app.router.add_get('/{tail:.*}', self.handle_static)
- self.session = None
-
- async def start(self):
- """Start the TTS server."""
- # Configure SSL context
- if not self.verify_ssl:
- ssl_context = ssl.create_default_context()
- ssl_context.check_hostname = False
- ssl_context.verify_mode = ssl.CERT_NONE
- logger.warning("SSL certificate verification disabled. This is insecure and should only be used for testing.")
- connector = aiohttp.TCPConnector(ssl=False)
- self.session = aiohttp.ClientSession(connector=connector)
- logger.info("Created aiohttp session with SSL verification disabled")
- else:
- self.session = aiohttp.ClientSession()
- logger.info("Created aiohttp session with default SSL settings")
-
- # Initialize proxy manager if enabled
- if self.use_proxy and self.proxy_manager:
- await self.proxy_manager.initialize(self.session)
- logger.info("Initialized proxy manager")
-
- # Start the task processor
- asyncio.create_task(self.process_queue())
- runner = web.AppRunner(self.app)
- await runner.setup()
- site = web.TCPSite(runner, self.host, self.port)
- await site.start()
- logger.info(f"TTS server running at http://{self.host}:{self.port}")
- if not self.verify_ssl:
- logger.warning("Running with SSL verification disabled. Not recommended for production use.")
- if self.use_proxy:
- logger.info("Running with proxy pool enabled for IP rotation")
-
- async def stop(self):
- """Stop the TTS server."""
- if self.session:
- await self.session.close()
-
- async def process_queue(self):
- """Background task to process the queue."""
- while True:
- try:
- # Get next task from queue
- task_data = await self.queue.get()
-
- async with self.processing_lock:
- self.current_task = task_data
- try:
- # Process the task
- response = await self.process_tts_request(task_data)
- # Send response through the response future
- task_data['response_future'].set_result(response)
- except Exception as e:
- task_data['response_future'].set_exception(e)
- finally:
- self.current_task = None
- self.queue.task_done()
-
- except Exception as e:
- logger.error(f"Error processing queue: {str(e)}")
- await asyncio.sleep(1) # Prevent tight loop on persistent errors
-
- async def process_tts_request(self, task_data: Dict[str, Any]) -> web.Response:
- """Process a single TTS request."""
- max_retries = 3
- retry_count = 0
-
- while retry_count < max_retries:
- try:
- logger.info(f"Sending request to OpenAI.fm with data: {task_data['data']}")
-
- headers = {
- "Accept": "*/*",
- "Accept-Language": "en-US,en;q=0.9",
- "Origin": "https://www.openai.fm",
- "Referer": "https://www.openai.fm/",
- "Content-Type": "application/x-www-form-urlencoded"
- }
-
- # Get proxy if enabled
- proxy = None
- if self.use_proxy and self.proxy_manager:
- proxy = await self.proxy_manager.get_proxy()
- if proxy:
- logger.info(f"Using proxy: {proxy}")
- else:
- logger.warning("No proxy available, proceeding without proxy")
-
- request_kwargs = {
- "data": task_data['data'],
- "headers": headers
- }
-
- if proxy:
- request_kwargs["proxy"] = proxy
-
- async with self.session.post(
- "https://www.openai.fm/api/generate",
- **request_kwargs
- ) as response:
- if response.status == 403:
- logger.warning("Received 403 Forbidden from OpenAI.fm")
- if self.use_proxy and self.proxy_manager:
- logger.info("Rotating proxy and retrying")
- retry_count += 1
- await asyncio.sleep(1)
- continue
-
- audio_data = await response.read()
-
- if response.status != 200:
- logger.error(f"Error from OpenAI.fm: {response.status}")
- error_msg = f"Error from upstream service: {response.status}"
- return web.Response(
- text=json.dumps({"error": error_msg}),
- status=response.status,
- content_type="application/json"
- )
-
- return web.Response(
- body=audio_data,
- content_type=task_data['content_type'],
- headers={
- "Access-Control-Allow-Origin": "*",
- "Access-Control-Allow-Methods": "POST, OPTIONS",
- "Access-Control-Allow-Headers": "Content-Type, Authorization"
- }
- )
- except aiohttp.ClientProxyConnectionError:
- logger.warning(f"Proxy connection error, retrying with new proxy (attempt {retry_count+1}/{max_retries})")
- retry_count += 1
- await asyncio.sleep(1)
- except Exception as e:
- logger.error(f"Error processing TTS request: {str(e)}")
- return web.Response(
- text=json.dumps({"error": str(e)}),
- status=500,
- content_type="application/json"
- )
-
- # If we've exhausted retries
- logger.error("Exhausted retries for TTS request")
- return web.Response(
- text=json.dumps({"error": "Failed to process request after multiple retries"}),
- status=500,
- content_type="application/json"
- )
-
- async def handle_openai_speech(self, request: web.Request) -> web.Response:
- """Handle POST requests to /v1/audio/speech (OpenAI compatible API)."""
- try:
- # Check if queue is full
- if self.queue.full():
- return web.Response(
- text=json.dumps({
- "error": "Queue is full. Please try again later.",
- "queue_size": self.queue.qsize()
- }),
- status=429, # Too Many Requests
- content_type="application/json"
- )
-
- # Read JSON data
- body = await request.json()
-
- # Map OpenAI format to our internal format
- openai_fm_data = {}
- content_type = "audio/mpeg"
-
- # Required parameters
- if 'input' not in body or 'voice' not in body:
- return web.Response(
- text=json.dumps({"error": "Missing required parameters: input and voice"}),
- status=400,
- content_type="application/json"
- )
-
- openai_fm_data['input'] = body['input']
- openai_fm_data['voice'] = body['voice']
-
- # Map 'instructions' to 'prompt' if provided
- if 'instructions' in body:
- openai_fm_data['prompt'] = body['instructions']
-
- # Check for response_format
- if 'response_format' in body:
- format_mapping = {
- 'mp3': 'audio/mpeg',
- 'opus': 'audio/opus',
- 'aac': 'audio/aac',
- 'flac': 'audio/flac',
- 'wav': 'audio/wav',
- 'pcm': 'audio/pcm'
- }
- content_type = format_mapping.get(body['response_format'], 'audio/mpeg')
-
- # Create response future
- response_future = asyncio.Future()
-
- # Create task data
- task_data = {
- 'data': openai_fm_data,
- 'content_type': content_type,
- 'response_future': response_future,
- 'timestamp': time.time()
- }
-
- # Add to queue
- await self.queue.put(task_data)
- logger.info(f"Added task to queue. Current size: {self.queue.qsize()}")
-
- # Wait for response
- return await response_future
-
- except Exception as e:
- logger.error(f"Error handling request: {str(e)}")
- return web.Response(
- text=json.dumps({"error": str(e)}),
- status=500,
- content_type="application/json",
- headers={
- "Access-Control-Allow-Origin": "*",
- "Access-Control-Allow-Methods": "POST, OPTIONS",
- "Access-Control-Allow-Headers": "Content-Type, Authorization"
- }
- )
-
- async def handle_queue_size(self, request: web.Request) -> web.Response:
- """Handle GET requests to /api/queue-size."""
- return web.json_response({
- "queue_size": self.queue.qsize(),
- "max_queue_size": self.queue.maxsize
- }, headers={
- "Access-Control-Allow-Origin": "*",
- "Access-Control-Allow-Methods": "GET, OPTIONS",
- "Access-Control-Allow-Headers": "Content-Type"
- })
-
- async def handle_static(self, request: web.Request) -> web.Response:
- """Handle static file requests.
-
- Args:
- request: The incoming request
-
- Returns:
- web.Response: The response to send back
- """
- try:
- # Get file path from request
- file_path = request.match_info['tail']
- if not file_path:
- file_path = 'index.html'
-
- # Construct full path
- full_path = Path(__file__).parent / file_path
-
- # Check if file exists
- if not full_path.exists():
- return web.Response(text="Not found", status=404)
-
- # Read file
- with open(full_path, 'rb') as f:
- content = f.read()
-
- # Determine content type
- content_type = {
- '.html': 'text/html',
- '.css': 'text/css',
- '.js': 'application/javascript',
- '.png': 'image/png',
- '.jpg': 'image/jpeg',
- '.gif': 'image/gif',
- '.ico': 'image/x-icon'
- }.get(full_path.suffix, 'application/octet-stream')
-
- # Return response
- return web.Response(
- body=content,
- content_type=content_type,
- headers={
- "Access-Control-Allow-Origin": "*",
- "Access-Control-Allow-Methods": "GET, OPTIONS",
- "Access-Control-Allow-Headers": "Content-Type"
- }
- )
-
- except Exception as e:
- logger.error(f"Error serving static file: {str(e)}")
- return web.Response(text=str(e), status=500)
-
-async def run_server(host: str = "localhost", port: int = 7000, verify_ssl: bool = True, use_proxy: bool = False):
- """Run the TTS server.
-
- Args:
- host: Host to bind to
- port: Port to bind to
- verify_ssl: Whether to verify SSL certificates (disable only for testing)
- use_proxy: Whether to use a proxy pool for requests
- """
- server = TTSServer(host, port, verify_ssl=verify_ssl, use_proxy=use_proxy)
- await server.start()
-
- try:
- # Keep the server running
- while True:
- await asyncio.sleep(1)
- except KeyboardInterrupt:
- await server.stop()
- logger.info("TTS server stopped")
-
-if __name__ == "__main__":
- import argparse
-
- parser = argparse.ArgumentParser(description="Run the TTS API server")
- parser.add_argument("--host", type=str, default="localhost", help="Host to bind to")
- parser.add_argument("--port", type=int, default=7000, help="Port to bind to")
- parser.add_argument("--no-verify-ssl", action="store_true", help="Disable SSL certificate verification (insecure, use only for testing)")
- parser.add_argument("--use-proxy", action="store_true", help="Use proxy pool for IP rotation")
- parser.add_argument("--test-connection", action="store_true", help="Test connection to OpenAI.fm and exit")
-
- args = parser.parse_args()
-
- # If SSL verification is disabled, apply it globally
- if args.no_verify_ssl:
- import ssl
-
- # Disable SSL verification globally in Python
- ssl._create_default_https_context = ssl._create_unverified_context
- logger.warning("SSL certificate verification disabled GLOBALLY. This is insecure!")
-
- # Don't create connector here - it needs a running event loop
-
- # Test connection mode
- if args.test_connection:
- async def test_openai_fm():
- logger.info("Testing connection to OpenAI.fm...")
-
- if args.no_verify_ssl:
- connector = aiohttp.TCPConnector(ssl=False)
- session = aiohttp.ClientSession(connector=connector)
- logger.info("Using session with SSL verification disabled")
- else:
- session = aiohttp.ClientSession()
- logger.info("Using session with default SSL settings")
-
- try:
- logger.info("Sending GET request to OpenAI.fm homepage")
- async with session.get("https://www.openai.fm") as response:
- logger.info(f"Homepage status: {response.status}")
- if response.status == 200:
- logger.info("Successfully connected to OpenAI.fm homepage")
- else:
- logger.error(f"Failed to connect to OpenAI.fm homepage: {response.status}")
-
- logger.info("Testing API endpoint with minimal request")
- test_data = {"input": "Test", "voice": "alloy"}
- import urllib.parse
- url_encoded_data = urllib.parse.urlencode(test_data)
-
- async with session.post(
- "https://www.openai.fm/api/generate",
- data=url_encoded_data,
- headers={
- "Accept": "*/*",
- "Accept-Language": "en-US,en;q=0.9",
- "Origin": "https://www.openai.fm",
- "Referer": "https://www.openai.fm/",
- "Content-Type": "application/x-www-form-urlencoded"
- }
- ) as response:
- logger.info(f"API endpoint status: {response.status}")
- if response.status == 200:
- data = await response.read()
- logger.info(f"Successfully received {len(data)} bytes from API")
- else:
- text = await response.text()
- logger.error(f"API request failed: {response.status}, {text}")
-
- except Exception as e:
- logger.error(f"Connection test failed with error: {str(e)}")
- import traceback
- logger.error(traceback.format_exc())
- finally:
- await session.close()
-
- asyncio.run(test_openai_fm())
- logger.info("Connection test completed")
- exit(0)
-
- # Start the server
- asyncio.run(run_server(args.host, args.port, verify_ssl=not args.no_verify_ssl, use_proxy=args.use_proxy))
\ No newline at end of file
diff --git a/server/__init__.py b/server/__init__.py
new file mode 100644
index 0000000..4d4fc16
--- /dev/null
+++ b/server/__init__.py
@@ -0,0 +1,5 @@
+"""
+Server Package
+
+This package contains the TTS API server implementation.
+"""
\ No newline at end of file
diff --git a/server/api.py b/server/api.py
new file mode 100644
index 0000000..694e856
--- /dev/null
+++ b/server/api.py
@@ -0,0 +1,151 @@
+"""
+TTS API Server
+
+This module provides a server that's compatible with OpenAI's TTS API format.
+"""
+
+import asyncio
+import aiohttp
+import logging
+import ssl
+from aiohttp import web
+from typing import Optional
+
+from server.handlers import handle_openai_speech, handle_queue_size, handle_static, process_tts_request
+from proxy.manager import ProxyManager
+
+# Configure logging
+logging.basicConfig(
+ level=logging.INFO,
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
+)
+logger = logging.getLogger(__name__)
+
+class TTSServer:
+ """Server that's compatible with OpenAI's TTS API."""
+
+ def __init__(self, host: str = "localhost", port: int = 7000,
+ max_queue_size: int = 100, verify_ssl: bool = True,
+ use_proxy: bool = False, proxy_api: str = "https://proxy.scdn.io/api/get_proxy.php",
+ proxy_protocol: str = "http", proxy_batch_size: int = 5):
+ """Initialize the TTS server.
+
+ Args:
+ host: Host to bind to
+ port: Port to bind to
+ max_queue_size: Maximum number of tasks in queue
+ verify_ssl: Whether to verify SSL certificates when connecting to external services
+ use_proxy: Whether to use a proxy pool for requests
+ proxy_api: URL of the proxy pool API
+ proxy_protocol: Proxy protocol to use (http, https, socks4, socks5, all)
+ proxy_batch_size: Number of proxies to fetch at once
+ """
+ self.host = host
+ self.port = port
+ self.app = web.Application()
+ self.verify_ssl = verify_ssl
+ self.use_proxy = use_proxy
+
+ # Initialize queue system
+ self.queue = asyncio.Queue(maxsize=max_queue_size)
+ self.current_task = None
+ self.processing_lock = asyncio.Lock()
+
+ # Initialize proxy manager if needed
+ self.proxy_manager = ProxyManager(
+ api_url=proxy_api,
+ protocol=proxy_protocol,
+ batch_size=proxy_batch_size
+ ) if use_proxy else None
+
+ # Set up routes
+ self.setup_routes()
+
+ self.session = None
+
+ def setup_routes(self):
+ """Set up the API routes."""
+ # OpenAI compatible endpoint
+ self.app.router.add_post('/v1/audio/speech', self._handle_openai_speech)
+ self.app.router.add_get('/api/queue-size', self._handle_queue_size)
+ self.app.router.add_get('/{tail:.*}', handle_static)
+
+ async def _handle_openai_speech(self, request):
+ """Handler for OpenAI speech endpoint."""
+ return await handle_openai_speech(
+ request,
+ self.queue,
+ proxy_manager=self.proxy_manager,
+ session=self.session,
+ use_proxy=self.use_proxy
+ )
+
+ async def _handle_queue_size(self, request):
+ """Handler for queue size endpoint."""
+ return await handle_queue_size(request, self.queue)
+
+ async def start(self):
+ """Start the TTS server."""
+ # Configure SSL context
+ if not self.verify_ssl:
+ ssl_context = ssl.create_default_context()
+ ssl_context.check_hostname = False
+ ssl_context.verify_mode = ssl.CERT_NONE
+ logger.warning("SSL certificate verification disabled. This is insecure and should only be used for testing.")
+ connector = aiohttp.TCPConnector(ssl=False)
+ self.session = aiohttp.ClientSession(connector=connector)
+ logger.info("Created aiohttp session with SSL verification disabled")
+ else:
+ self.session = aiohttp.ClientSession()
+ logger.info("Created aiohttp session with default SSL settings")
+
+ # Initialize proxy manager if enabled
+ if self.use_proxy and self.proxy_manager:
+ await self.proxy_manager.initialize(self.session)
+ logger.info("Initialized proxy manager")
+
+ # Start the task processor
+ asyncio.create_task(self.process_queue())
+ runner = web.AppRunner(self.app)
+ await runner.setup()
+ site = web.TCPSite(runner, self.host, self.port)
+ await site.start()
+ logger.info(f"TTS server running at http://{self.host}:{self.port}")
+ if not self.verify_ssl:
+ logger.warning("Running with SSL verification disabled. Not recommended for production use.")
+ if self.use_proxy:
+ logger.info("Running with proxy pool enabled for IP rotation")
+
+ async def stop(self):
+ """Stop the TTS server."""
+ if self.session:
+ await self.session.close()
+
+ async def process_queue(self):
+ """Background task to process the queue."""
+ while True:
+ try:
+ # Get next task from queue
+ task_data = await self.queue.get()
+
+ async with self.processing_lock:
+ self.current_task = task_data
+ try:
+ # Process the task
+ response = await process_tts_request(
+ task_data,
+ self.session,
+ proxy_manager=self.proxy_manager,
+ use_proxy=self.use_proxy
+ )
+ # Send response through the response future
+ task_data['response_future'].set_result(response)
+ except Exception as e:
+ task_data['response_future'].set_exception(e)
+ finally:
+ self.current_task = None
+ self.queue.task_done()
+
+ except Exception as e:
+ logger.error(f"Error processing queue: {str(e)}")
+ await asyncio.sleep(1) # Prevent tight loop on persistent errors
\ No newline at end of file
diff --git a/server/handlers.py b/server/handlers.py
new file mode 100644
index 0000000..8627336
--- /dev/null
+++ b/server/handlers.py
@@ -0,0 +1,244 @@
+"""
+HTTP Request Handlers
+
+This module contains the API endpoint handlers for the TTS server.
+"""
+
+import json
+import time
+import logging
+import asyncio
+import aiohttp
+from aiohttp import web
+from pathlib import Path
+from typing import Dict, Any
+
+logger = logging.getLogger(__name__)
+
+async def handle_openai_speech(request: web.Request, queue, proxy_manager=None, session=None, use_proxy=False) -> web.Response:
+ """Handle POST requests to /v1/audio/speech (OpenAI compatible API)."""
+ try:
+ # Check if queue is full
+ if queue.full():
+ return web.Response(
+ text=json.dumps({
+ "error": "Queue is full. Please try again later.",
+ "queue_size": queue.qsize()
+ }),
+ status=429, # Too Many Requests
+ content_type="application/json"
+ )
+
+ # Read JSON data
+ body = await request.json()
+
+ # Map OpenAI format to our internal format
+ openai_fm_data = {}
+ content_type = "audio/mpeg"
+
+ # Required parameters
+ if 'input' not in body or 'voice' not in body:
+ return web.Response(
+ text=json.dumps({"error": "Missing required parameters: input and voice"}),
+ status=400,
+ content_type="application/json"
+ )
+
+ openai_fm_data['input'] = body['input']
+ openai_fm_data['voice'] = body['voice']
+
+ # Map 'instructions' to 'prompt' if provided
+ if 'instructions' in body:
+ openai_fm_data['prompt'] = body['instructions']
+
+ # Check for response_format
+ if 'response_format' in body:
+ format_mapping = {
+ 'mp3': 'audio/mpeg',
+ 'opus': 'audio/opus',
+ 'aac': 'audio/aac',
+ 'flac': 'audio/flac',
+ 'wav': 'audio/wav',
+ 'pcm': 'audio/pcm'
+ }
+ content_type = format_mapping.get(body['response_format'], 'audio/mpeg')
+
+ # Create response future
+ response_future = asyncio.Future()
+
+ # Create task data
+ task_data = {
+ 'data': openai_fm_data,
+ 'content_type': content_type,
+ 'response_future': response_future,
+ 'timestamp': time.time()
+ }
+
+ # Add to queue
+ await queue.put(task_data)
+ logger.info(f"Added task to queue. Current size: {queue.qsize()}")
+
+ # Wait for response
+ return await response_future
+
+ except Exception as e:
+ logger.error(f"Error handling request: {str(e)}")
+ return web.Response(
+ text=json.dumps({"error": str(e)}),
+ status=500,
+ content_type="application/json",
+ headers={
+ "Access-Control-Allow-Origin": "*",
+ "Access-Control-Allow-Methods": "POST, OPTIONS",
+ "Access-Control-Allow-Headers": "Content-Type, Authorization"
+ }
+ )
+
+async def process_tts_request(task_data: Dict[str, Any], session, proxy_manager=None, use_proxy=False) -> web.Response:
+ """Process a single TTS request."""
+ max_retries = 3
+ retry_count = 0
+
+ while retry_count < max_retries:
+ try:
+ logger.info(f"Sending request to OpenAI.fm with data: {task_data['data']}")
+
+ headers = {
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.9",
+ "Origin": "https://www.openai.fm",
+ "Referer": "https://www.openai.fm/",
+ "Content-Type": "application/x-www-form-urlencoded"
+ }
+
+ # Get proxy if enabled
+ proxy = None
+ if use_proxy and proxy_manager:
+ proxy = await proxy_manager.get_proxy()
+ if proxy:
+ logger.info(f"Using proxy: {proxy}")
+ else:
+ logger.warning("No proxy available, proceeding without proxy")
+
+ request_kwargs = {
+ "data": task_data['data'],
+ "headers": headers
+ }
+
+ if proxy:
+ request_kwargs["proxy"] = proxy
+
+ async with session.post(
+ "https://www.openai.fm/api/generate",
+ **request_kwargs
+ ) as response:
+ if response.status == 403:
+ logger.warning("Received 403 Forbidden from OpenAI.fm")
+ if use_proxy and proxy_manager:
+ logger.info("Rotating proxy and retrying")
+ retry_count += 1
+ await asyncio.sleep(1)
+ continue
+
+ audio_data = await response.read()
+
+ if response.status != 200:
+ logger.error(f"Error from OpenAI.fm: {response.status}")
+ error_msg = f"Error from upstream service: {response.status}"
+ return web.Response(
+ text=json.dumps({"error": error_msg}),
+ status=response.status,
+ content_type="application/json"
+ )
+
+ return web.Response(
+ body=audio_data,
+ content_type=task_data['content_type'],
+ headers={
+ "Access-Control-Allow-Origin": "*",
+ "Access-Control-Allow-Methods": "POST, OPTIONS",
+ "Access-Control-Allow-Headers": "Content-Type, Authorization"
+ }
+ )
+ except aiohttp.ClientProxyConnectionError:
+ logger.warning(f"Proxy connection error, retrying with new proxy (attempt {retry_count+1}/{max_retries})")
+ retry_count += 1
+ await asyncio.sleep(1)
+ except Exception as e:
+ logger.error(f"Error processing TTS request: {str(e)}")
+ return web.Response(
+ text=json.dumps({"error": str(e)}),
+ status=500,
+ content_type="application/json"
+ )
+
+ # If we've exhausted retries
+ logger.error("Exhausted retries for TTS request")
+ return web.Response(
+ text=json.dumps({"error": "Failed to process request after multiple retries"}),
+ status=500,
+ content_type="application/json"
+ )
+
+async def handle_queue_size(request: web.Request, queue) -> web.Response:
+ """Handle GET requests to /api/queue-size."""
+ return web.json_response({
+ "queue_size": queue.qsize(),
+ "max_queue_size": queue.maxsize
+ }, headers={
+ "Access-Control-Allow-Origin": "*",
+ "Access-Control-Allow-Methods": "GET, OPTIONS",
+ "Access-Control-Allow-Headers": "Content-Type"
+ })
+
+async def handle_static(request: web.Request) -> web.Response:
+ """Handle static file requests.
+
+ Args:
+ request: The incoming request
+
+ Returns:
+ web.Response: The response to send back
+ """
+ try:
+ # Get file path from request
+ file_path = request.match_info['tail']
+ if not file_path:
+ file_path = 'index.html'
+
+ # Construct full path - look in static directory
+ full_path = Path(__file__).parent.parent / 'static' / file_path
+
+ # Check if file exists
+ if not full_path.exists():
+ return web.Response(text="Not found", status=404)
+
+ # Read file
+ with open(full_path, 'rb') as f:
+ content = f.read()
+
+ # Determine content type
+ content_type = {
+ '.html': 'text/html',
+ '.css': 'text/css',
+ '.js': 'application/javascript',
+ '.png': 'image/png',
+ '.jpg': 'image/jpeg',
+ '.gif': 'image/gif',
+ '.ico': 'image/x-icon'
+ }.get(full_path.suffix, 'application/octet-stream')
+
+ # Return response
+ return web.Response(
+ body=content,
+ content_type=content_type,
+ headers={
+ "Access-Control-Allow-Origin": "*",
+ "Access-Control-Allow-Methods": "GET, OPTIONS",
+ "Access-Control-Allow-Headers": "Content-Type"
+ }
+ )
+
+ except Exception as e:
+ logger.error(f"Error serving static file: {str(e)}")
+ return web.Response(text=str(e), status=500)
\ No newline at end of file
diff --git a/index.html b/static/index.html
similarity index 92%
rename from index.html
rename to static/index.html
index 471d881..3554206 100644
--- a/index.html
+++ b/static/index.html
@@ -1,323 +1,344 @@
-
-
-
-
-
- OpenAI TTS API Documentation
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Text-to-Speech API with Multiple Voice Options
-
-
-
-
-
- Service Status
-
-
-
-
-
- Active Requests:
- 0
-
-
- Maximum Capacity:
- 100
-
-
-
-
No Load
-
-
-
-
-
-
-
-
-
-
-
- Quick Start
- Choose your preferred programming language to get started with the API:
-
-
-
-
-
import requests
-
-url = "https://ttsapi.site/v1/audio/speech"
-headers = {
- "Content-Type": "application/json"
-}
-data = {
- "input": "Hello, this is a test.",
- "voice": "alloy",
- "instructions": "Speak in a cheerful and upbeat tone." # Optional
-}
-
-response = requests.post(url, json=data, headers=headers)
-if response.status_code == 200:
- # Save the audio file - always MP3 format regardless of Content-Type header
- with open("output.mp3", "wb") as f:
- f.write(response.content)
- print("Audio saved as output.mp3")
-else:
- print(f"Error: {response.status_code}, {response.json()}")
-
-
-
-
-
-
async function generateSpeech() {
- const response = await fetch('https://ttsapi.site/v1/audio/speech', {
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json'
- },
- body: JSON.stringify({
- input: 'Hello, this is a test.',
- voice: 'alloy',
- instructions: 'Speak in a cheerful and upbeat tone.' // Optional
- })
- });
-
- if (response.ok) {
- // Always MP3 format regardless of Content-Type header
- const blob = await response.blob();
- const audio = new Audio(URL.createObjectURL(blob));
- audio.play();
- } else {
- const error = await response.json();
- console.error('Error:', error);
- }
-}
-
-
-
-
-
- Available Voices
-
- alloy
- ash
- ballad
- coral
- echo
- fable
- onyx
- nova
- sage
- shimmer
- verse
-
-
-
-
-
- API Reference
-
-
Generate Speech (OpenAI Compatible)
-
POST /v1/audio/speech
-
-
-
Request Parameters
-
-
- Parameter
- Type
- Required
- Description
-
-
- input
- string
- Yes
- The text to convert to speech
-
-
- voice
- string
- Yes
- The voice to use (see Available Voices)
-
-
- instructions
- string
- No
- Mapped to "prompt" parameter when sent to the backend service. Can be used to guide voice emotion or style.
-
-
- response_format
- string
- No
- OpenAI compatibility only - only changes the Content-Type header but not the actual audio format. May result in incorrect Content-Type headers. Audio is always MP3.
-
-
- model
- string
- No
- OpenAI compatibility only - completely ignored.
-
-
- speed
- number
- No
- OpenAI compatibility only - completely ignored.
-
-
-
-
-
Note: Parameters in gray are completely ignored by the service or may cause misleading behavior. Only input, voice, and instructions affect the actual TTS output.
-
-
-
-
-
How the Instructions Parameter Works
-
The instructions parameter is mapped to a prompt parameter when sent to the backend service. It can be used to guide the voice emotion, tone, or style. Some examples of effective instructions:
-
-
- Emotional guidance: "Speak in a happy and excited tone."
- Character impersonation: "Speak like a wise old wizard."
- Contextual hints: "This is being read to a child, speak gently."
- Reading style: "Read this as a news broadcast."
-
-
-
-
Tip: Keep instructions clear and concise. Overly complex instructions may not be interpreted correctly.
-
-
-
-
Response Format
-
The API always returns a binary MP3 audio file with the following headers:
-
- Content-Type: "audio/mpeg"
- Access-Control-Allow-Origin: "*" (CORS enabled)
-
-
-
-
Important: While the response_format parameter may change the Content-Type header in the response, it does not actually convert the audio format. The audio is always returned as MP3 from the upstream service.
-
-
-
Error Responses
-
-
- Status Code
- Description
-
-
- 400
- Missing required parameters (input or voice)
-
-
- 429
- Queue is full, try again later
-
-
- 500
- Internal server error
-
-
-
-
-
-
-
-
Queue System
-
The API uses a queue system to handle multiple requests efficiently:
-
- Maximum queue size: 100 requests
- Requests are processed in FIFO (First In, First Out) order
- Queue status can be monitored via the /api/queue-size endpoint
-
-
-
Queue Status Endpoint
-
GET /api/queue-size
-
Returns JSON with queue information:
-
{
- "queue_size": 0,
- "max_queue_size": 100
-}
-
-
-
-
-
-
-
-
+
+
+
+
+
+ ttsfm
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Text-to-Speech API with Multiple Voice Options
+
+
+
+
+
+
+
+
+ Service Status
+
+
+
+
+
+ Active Requests:
+ 0
+
+
+ Maximum Capacity:
+ 100
+
+
+
+
No Load
+
+
+
+
+
+
+
+
+
+
+
+ Quick Start
+ Choose your preferred programming language to get started with the API:
+
+
+
+
+
import requests
+
+url = "https://ttsapi.site/v1/audio/speech"
+headers = {
+ "Content-Type": "application/json"
+}
+data = {
+ "input": "Hello, this is a test.",
+ "voice": "alloy",
+ "instructions": "Speak in a cheerful and upbeat tone." # Optional
+}
+
+response = requests.post(url, json=data, headers=headers)
+if response.status_code == 200:
+ # Save the audio file - always MP3 format regardless of Content-Type header
+ with open("output.mp3", "wb") as f:
+ f.write(response.content)
+ print("Audio saved as output.mp3")
+else:
+ print(f"Error: {response.status_code}, {response.json()}")
+
+
+
+
+
+
async function generateSpeech() {
+ const response = await fetch('https://ttsapi.site/v1/audio/speech', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({
+ input: 'Hello, this is a test.',
+ voice: 'alloy',
+ instructions: 'Speak in a cheerful and upbeat tone.' // Optional
+ })
+ });
+
+ if (response.ok) {
+ // Always MP3 format regardless of Content-Type header
+ const blob = await response.blob();
+ const audio = new Audio(URL.createObjectURL(blob));
+ audio.play();
+ } else {
+ const error = await response.json();
+ console.error('Error:', error);
+ }
+}
+
+
+
+
+
+ Available Voices
+
+ alloy
+ ash
+ ballad
+ coral
+ echo
+ fable
+ onyx
+ nova
+ sage
+ shimmer
+ verse
+
+
+
+
+
+ API Reference
+
+
Generate Speech (OpenAI Compatible)
+
POST /v1/audio/speech
+
+
+
Request Parameters
+
+
+ Parameter
+ Type
+ Required
+ Description
+
+
+ input
+ string
+ Yes
+ The text to convert to speech
+
+
+ voice
+ string
+ Yes
+ The voice to use (see Available Voices)
+
+
+ instructions
+ string
+ No
+ Mapped to "prompt" parameter when sent to the backend service. Can be used to guide voice emotion or style.
+
+
+ response_format
+ string
+ No
+ OpenAI compatibility only - only changes the Content-Type header but not the actual audio format. May result in incorrect Content-Type headers. Audio is always MP3.
+
+
+ model
+ string
+ No
+ OpenAI compatibility only - completely ignored.
+
+
+ speed
+ number
+ No
+ OpenAI compatibility only - completely ignored.
+
+
+
+
+
Note: Parameters in gray are completely ignored by the service or may cause misleading behavior. Only input, voice, and instructions affect the actual TTS output.
+
+
+
+
+
How the Instructions Parameter Works
+
The instructions parameter is mapped to a prompt parameter when sent to the backend service. It can be used to guide the voice emotion, tone, or style. Some examples of effective instructions:
+
+
+ Emotional guidance: "Speak in a happy and excited tone."
+ Character impersonation: "Speak like a wise old wizard."
+ Contextual hints: "This is being read to a child, speak gently."
+ Reading style: "Read this as a news broadcast."
+
+
+
+
Tip: Keep instructions clear and concise. Overly complex instructions may not be interpreted correctly.
+
+
+
+
Response Format
+
The API always returns a binary MP3 audio file with the following headers:
+
+ Content-Type: "audio/mpeg"
+ Access-Control-Allow-Origin: "*" (CORS enabled)
+
+
+
+
Important: While the response_format parameter may change the Content-Type header in the response, it does not actually convert the audio format. The audio is always returned as MP3 from the upstream service.
+
+
+
Error Responses
+
+
+ Status Code
+ Description
+
+
+ 400
+ Missing required parameters (input or voice)
+
+
+ 429
+ Queue is full, try again later
+
+
+ 500
+ Internal server error
+
+
+
+
+
+
+
+
Queue System
+
The API uses a queue system to handle multiple requests efficiently:
+
+ Maximum queue size: 100 requests
+ Requests are processed in FIFO (First In, First Out) order
+ Queue status can be monitored via the /api/queue-size endpoint
+
+
+
Queue Status Endpoint
+
GET /api/queue-size
+
Returns JSON with queue information:
+
{
+ "queue_size": 0,
+ "max_queue_size": 100
+}
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/index_zh.html b/static/index_zh.html
similarity index 93%
rename from index_zh.html
rename to static/index_zh.html
index d2f0c43..347744d 100644
--- a/index_zh.html
+++ b/static/index_zh.html
@@ -3,7 +3,7 @@
- OpenAI TTS API 文档
+ ttsfm
@@ -29,6 +29,9 @@ OpenAI TTS API 文档
API 域名: ttsapi.site
+
+ 版本: 1.1,2
+
English
中文
@@ -36,6 +39,19 @@
OpenAI TTS API 文档
+
+
+
服务状态
@@ -76,11 +92,16 @@ 立即体验
语音
Alloy
+ Ash
+ Ballad
+ Coral
Echo
Fable
Onyx
Nova
+ Sage
Shimmer
+ Verse
diff --git a/script.js b/static/script.js
similarity index 100%
rename from script.js
rename to static/script.js
diff --git a/styles.css b/static/styles.css
similarity index 92%
rename from styles.css
rename to static/styles.css
index f76a1a1..f26c54b 100644
--- a/styles.css
+++ b/static/styles.css
@@ -1,1100 +1,1183 @@
-:root {
- --primary-color: #2563eb;
- --secondary-color: #1e40af;
- --background-color: #0f172a;
- --text-color: #e2e8f0;
- --border-color: #1e293b;
- --success-color: #10b981;
- --error-color: #ef4444;
- --card-bg: #1e293b;
- --card-hover: #334155;
- --code-bg: #1e293b;
- --header-bg: #0f172a;
- --panel-bg: #ffffff;
- --docs-bg: #1a1a1a;
- --docs-text: #e5e7eb;
- --gradient-start: #60a5fa;
- --gradient-end: #34d399;
-}
-
-* {
- box-sizing: border-box;
- margin: 0;
- padding: 0;
-}
-
-body {
- font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
- line-height: 1.6;
- margin: 0;
- padding: 0;
- color: #333;
- background-color: #f5f5f5;
-}
-
-.app-container {
- max-width: 1200px;
- margin: 0 auto;
- padding: 2rem;
-}
-
-.content-wrapper {
- background-color: #fff;
- border-radius: 8px;
- box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
- padding: 2rem;
-}
-
-/* Header */
-.main-header {
- margin-bottom: 3rem;
- padding-bottom: 1rem;
- border-bottom: 1px solid #eee;
-}
-
-.header-top {
- display: flex;
- justify-content: space-between;
- align-items: center;
- margin-bottom: 0.5rem;
-}
-
-.github-link {
- color: #333;
- font-size: 1.5rem;
- transition: all 0.2s ease;
- background-color: #f8fafc;
- padding: 0.5rem 1rem;
- border-radius: 6px;
- border: 1px solid #e2e8f0;
- display: flex;
- align-items: center;
- gap: 0.5rem;
- text-decoration: none;
-}
-
-.github-link i {
- font-size: 1.5rem;
-}
-
-.github-link:hover {
- color: #2563eb;
- background-color: #eff6ff;
- border-color: #2563eb;
- transform: translateY(-2px);
- box-shadow: 0 4px 6px rgba(37, 99, 235, 0.1);
-}
-
-.header-bottom {
- display: flex;
- justify-content: space-between;
- align-items: center;
- margin-top: 1rem;
-}
-
-.language-selector {
- display: flex;
- gap: 0.5rem;
-}
-
-.lang-btn {
- padding: 0.5rem 1rem;
- border: 1px solid #e2e8f0;
- border-radius: 4px;
- background: white;
- cursor: pointer;
- font-size: 0.9rem;
- transition: all 0.2s ease;
-}
-
-.lang-btn:hover {
- border-color: #2563eb;
- color: #2563eb;
-}
-
-.lang-btn.active {
- background: #2563eb;
- color: white;
- border-color: #2563eb;
-}
-
-.main-header h1 {
- margin: 0;
- color: #2c3e50;
- font-size: 2.5rem;
-}
-
-.subtitle {
- color: #666;
- margin: 0.5rem 0 0;
-}
-
-/* Content sections */
-.content-section {
- margin-bottom: 3rem;
-}
-
-.content-section h2 {
- color: #2c3e50;
- margin-bottom: 1.5rem;
- font-size: 1.8rem;
-}
-
-/* Code blocks */
-.code-block {
- background: #1e1e1e;
- border-radius: 6px;
- overflow: hidden;
- margin: 1rem 0;
-}
-
-.code-header {
- display: flex;
- justify-content: space-between;
- align-items: center;
- padding: 0.5rem 1rem;
- background: #2d2d2d;
- border-bottom: 1px solid #3d3d3d;
-}
-
-.code-language {
- color: #fff;
- font-size: 0.9rem;
-}
-
-.copy-button {
- background: none;
- border: none;
- color: #fff;
- cursor: pointer;
- padding: 0.25rem 0.5rem;
- font-size: 0.9rem;
-}
-
-.copy-button:hover {
- color: #4CAF50;
-}
-
-/* Voice table */
-.voice-table {
- width: 100%;
- border-collapse: collapse;
- margin: 1rem 0;
-}
-
-.voice-table th,
-.voice-table td {
- padding: 0.75rem;
- text-align: left;
- border-bottom: 1px solid #eee;
-}
-
-.voice-table th {
- background-color: #f8f9fa;
- font-weight: 600;
- color: #2c3e50;
-}
-
-.voice-table tr:hover {
- background-color: #f8f9fa;
-}
-
-/* API endpoint */
-.api-endpoint {
- margin-bottom: 2rem;
-}
-
-.api-endpoint h3 {
- color: #2c3e50;
- margin-bottom: 1rem;
-}
-
-.api-endpoint h4 {
- color: #666;
- margin: 1.5rem 0 1rem;
-}
-
-/* Code syntax highlighting overrides */
-pre[class*="language-"] {
- margin: 0;
- border-radius: 0;
-}
-
-code[class*="language-"] {
- font-size: 0.9rem;
- padding: 1rem;
-}
-
-/* Status Section */
-.status-section {
- margin-bottom: 2rem;
-}
-
-.status-container {
- display: flex;
- justify-content: center;
- width: 100%;
-}
-
-.status-card {
- background-color: #fff;
- border-radius: 8px;
- box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
- padding: 1.5rem;
- width: 100%;
- max-width: 600px;
-}
-
-.status-header {
- display: flex;
- justify-content: space-between;
- align-items: center;
- margin-bottom: 1rem;
-}
-
-.status-header h3 {
- margin: 0;
- color: #2c3e50;
- font-size: 1.3rem;
-}
-
-.status-indicator {
- width: 12px;
- height: 12px;
- border-radius: 50%;
- background-color: #10b981; /* Default green */
-}
-
-.queue-stats {
- display: flex;
- justify-content: space-between;
- margin-bottom: 1rem;
-}
-
-.stat-item {
- display: flex;
- flex-direction: column;
-}
-
-.stat-label {
- font-size: 0.9rem;
- color: #64748b;
- margin-bottom: 0.25rem;
-}
-
-.stat-value {
- font-size: 1.5rem;
- font-weight: 600;
- color: #2c3e50;
-}
-
-.queue-progress-container {
- height: 8px;
- background-color: #e2e8f0;
- border-radius: 4px;
- overflow: hidden;
- margin-bottom: 0.75rem;
-}
-
-.queue-progress-bar {
- height: 100%;
- width: 0%;
- background: linear-gradient(to right, #10b981, #3b82f6);
- transition: width 0.5s ease, background-color 0.5s ease;
-}
-
-.queue-load-text {
- text-align: center;
- font-size: 0.9rem;
- font-weight: 500;
- color: #10b981; /* Default green */
-}
-
-/* Load status colors */
-.low-load {
- color: #10b981 !important; /* Green */
-}
-
-.medium-load {
- color: #f59e0b !important; /* Yellow/Orange */
-}
-
-.high-load {
- color: #ef4444 !important; /* Red */
-}
-
-.indicator-low {
- background-color: #10b981 !important;
-}
-
-.indicator-medium {
- background-color: #f59e0b !important;
-}
-
-.indicator-high {
- background-color: #ef4444 !important;
-}
-
-.progress-low {
- background: linear-gradient(to right, #10b981, #34d399) !important;
-}
-
-.progress-medium {
- background: linear-gradient(to right, #f59e0b, #fbbf24) !important;
-}
-
-.progress-high {
- background: linear-gradient(to right, #ef4444, #f87171) !important;
-}
-
-/* Voice Grid */
-.voice-grid {
- display: grid;
- grid-template-columns: repeat(auto-fill, minmax(250px, 1fr));
- gap: 1rem;
- margin-bottom: 1.5rem;
-}
-
-.voice-card {
- background: var(--card-bg);
- border: 1px solid var(--border-color);
- border-radius: 12px;
- padding: 1.25rem;
- transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
- position: relative;
- overflow: hidden;
-}
-
-.voice-card::after {
- content: '';
- position: absolute;
- top: 0;
- left: 0;
- width: 100%;
- height: 100%;
- background: linear-gradient(45deg, transparent, rgba(255, 255, 255, 0.05), transparent);
- transform: translateX(-100%);
- transition: transform 0.6s ease;
-}
-
-.voice-card:hover::after {
- transform: translateX(100%);
-}
-
-.voice-card:hover {
- transform: translateY(-2px);
- box-shadow: 0 8px 24px rgba(0, 0, 0, 0.2);
- border-color: var(--primary-color);
-}
-
-.voice-name {
- font-size: 1.1rem;
- font-weight: 600;
- color: var(--text-color);
- margin-bottom: 0.25rem;
- text-transform: capitalize;
- position: relative;
- display: inline-block;
-}
-
-.voice-name::after {
- content: '';
- position: absolute;
- bottom: -2px;
- left: 0;
- width: 0;
- height: 2px;
- background: linear-gradient(to right, var(--gradient-start), var(--gradient-end));
- transition: width 0.3s ease;
-}
-
-.voice-card:hover .voice-name::after {
- width: 100%;
-}
-
-.voice-description {
- font-size: 0.8rem;
- color: #94a3b8;
-}
-
-/* Processing Status */
-#processing-status {
- font-weight: 500;
-}
-
-#processing-status.processing {
- color: #60a5fa;
-}
-
-#processing-status.idle {
- color: #34d399;
-}
-
-/* Responsive Design */
-@media (max-width: 768px) {
- .app-container {
- padding: 1rem;
- }
-
- .main-header {
- padding: 2rem 1rem;
- }
-
- .main-header h1 {
- font-size: 2rem;
- }
-
- .queue-status {
- grid-template-columns: 1fr;
- }
-
- .voice-grid {
- grid-template-columns: 1fr;
- }
-
- .content-section {
- margin-bottom: 2rem;
- }
-
- .status-card, .voice-card {
- padding: 1.25rem;
- }
-
- .status-icon {
- width: 2.5rem;
- height: 2.5rem;
- font-size: 1.25rem;
- }
-
- .status-value {
- font-size: 1.25rem;
- }
-}
-
-@media (max-width: 480px) {
- .main-header h1 {
- font-size: 1.75rem;
- }
-
- .subtitle {
- font-size: 1rem;
- }
-
- .content-section h2 {
- font-size: 1.5rem;
- }
-
- .api-endpoint {
- padding: 1rem;
- }
-
- pre {
- padding: 1rem;
- }
-}
-
-/* Scrollbar Styling */
-::-webkit-scrollbar {
- width: 8px;
- height: 8px;
-}
-
-::-webkit-scrollbar-track {
- background: var(--background-color);
-}
-
-::-webkit-scrollbar-thumb {
- background: var(--border-color);
- border-radius: 4px;
-}
-
-::-webkit-scrollbar-thumb:hover {
- background: var(--card-hover);
-}
-
-/* Audio Container */
-.audio-container {
- margin-top: 20px;
- display: flex;
- flex-direction: column;
- gap: 15px;
-}
-
-.audio-wrapper {
- background: #2a2a2a;
- border-radius: 8px;
- padding: 15px;
- box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
-}
-
-.voice-label {
- font-size: 14px;
- color: #888;
- margin-bottom: 8px;
- text-transform: capitalize;
-}
-
-.audio-player {
- width: 100%;
- height: 40px;
- background: #1a1a1a;
- border-radius: 4px;
- padding: 5px;
-}
-
-.audio-player::-webkit-media-controls-panel {
- background: #1a1a1a;
-}
-
-.audio-player::-webkit-media-controls-play-button {
- background-color: #4CAF50;
- border-radius: 50%;
-}
-
-.audio-player::-webkit-media-controls-timeline {
- background-color: #4CAF50;
- border-radius: 2px;
-}
-
-.audio-player::-webkit-media-controls-volume-slider {
- background-color: #4CAF50;
- border-radius: 2px;
-}
-
-/* Header Decoration */
-.header-decoration {
- position: absolute;
- top: 0;
- right: 0;
- width: 200px;
- height: 200px;
- pointer-events: none;
-}
-
-.decoration-circle {
- position: absolute;
- border-radius: 50%;
- opacity: 0.1;
-}
-
-.decoration-circle:nth-child(1) {
- width: 100px;
- height: 100px;
- background: var(--gradient-start);
- top: 20px;
- right: 20px;
-}
-
-.decoration-circle:nth-child(2) {
- width: 60px;
- height: 60px;
- background: var(--gradient-end);
- top: 60px;
- right: 60px;
-}
-
-.decoration-circle:nth-child(3) {
- width: 40px;
- height: 40px;
- background: var(--primary-color);
- top: 100px;
- right: 100px;
-}
-
-/* Voice Icon */
-.voice-icon {
- width: 40px;
- height: 40px;
- background: rgba(255, 255, 255, 0.1);
- border-radius: 50%;
- display: flex;
- align-items: center;
- justify-content: center;
- margin-bottom: 1rem;
- transition: all 0.3s ease;
-}
-
-.voice-icon i {
- font-size: 1.25rem;
- color: var(--text-color);
-}
-
-.voice-card:hover .voice-icon {
- background: var(--primary-color);
- transform: scale(1.1);
-}
-
-/* Section Headers */
-.content-section h2 {
- display: flex;
- align-items: center;
- gap: 0.75rem;
- margin-bottom: 1.5rem;
-}
-
-.content-section h2 i {
- color: var(--primary-color);
- font-size: 1.5rem;
-}
-
-.api-endpoint h3 {
- display: flex;
- align-items: center;
- gap: 0.75rem;
-}
-
-.api-endpoint h3 i {
- color: var(--primary-color);
- font-size: 1.25rem;
-}
-
-/* Loading Animation */
-@keyframes pulse {
- 0% { transform: scale(1); }
- 50% { transform: scale(1.05); }
- 100% { transform: scale(1); }
-}
-
-.status-card.processing {
- animation: pulse 2s infinite;
-}
-
-.status-card.processing .status-icon {
- animation: spin 2s linear infinite;
-}
-
-@keyframes spin {
- from { transform: rotate(0deg); }
- to { transform: rotate(360deg); }
-}
-
-/* Parameter and Error Tables */
-.params-table,
-.error-table {
- width: 100%;
- border-collapse: collapse;
- margin: 1rem 0;
- background-color: #fff;
- border: 1px solid #eee;
- border-radius: 6px;
- overflow: hidden;
-}
-
-.params-table th,
-.params-table td,
-.error-table th,
-.error-table td {
- padding: 0.75rem;
- text-align: left;
- border-bottom: 1px solid #eee;
-}
-
-.params-table th,
-.error-table th {
- background-color: #f8f9fa;
- font-weight: 600;
- color: #2c3e50;
- text-transform: uppercase;
- font-size: 0.85rem;
-}
-
-.params-table td:nth-child(2),
-.params-table td:nth-child(3) {
- font-family: 'Fira Code', monospace;
- font-size: 0.9rem;
-}
-
-.params-table td:nth-child(3) {
- color: #2563eb;
- font-weight: 500;
-}
-
-.error-table td:first-child {
- font-family: 'Fira Code', monospace;
- font-weight: 500;
- color: #ef4444;
-}
-
-/* Response Format Section */
-.api-endpoint ul {
- list-style: none;
- padding-left: 0;
- margin: 1rem 0;
-}
-
-.api-endpoint ul li {
- margin: 0.5rem 0;
- padding-left: 1.5rem;
- position: relative;
-}
-
-.api-endpoint ul li::before {
- content: '•';
- position: absolute;
- left: 0.5rem;
- color: #2563eb;
-}
-
-.api-endpoint ul li code {
- background-color: #f1f5f9;
- padding: 0.2rem 0.4rem;
- border-radius: 4px;
- font-family: 'Fira Code', monospace;
- font-size: 0.9rem;
- color: #2563eb;
-}
-
-/* Queue System Section */
-.api-endpoint p {
- margin: 1rem 0;
- line-height: 1.6;
- color: #4a5568;
-}
-
-/* Language Tabs */
-.language-tabs {
- display: flex;
- gap: 1rem;
- margin-bottom: 1rem;
-}
-
-.language-tab {
- padding: 0.5rem 1rem;
- background-color: #f1f5f9;
- border-radius: 4px;
- cursor: pointer;
- font-weight: 500;
- color: #4a5568;
- transition: all 0.2s ease;
-}
-
-.language-tab:hover,
-.language-tab.active {
- background-color: #2563eb;
- color: #fff;
-}
-
-/* Best Used For Column */
-.voice-table td:last-child {
- color: #4a5568;
- font-style: italic;
-}
-
-/* Mobile Responsiveness for New Elements */
-@media (max-width: 768px) {
- .params-table,
- .error-table {
- display: block;
- overflow-x: auto;
- -webkit-overflow-scrolling: touch;
- }
-
- .params-table th,
- .params-table td,
- .error-table th,
- .error-table td {
- min-width: 120px;
- }
-}
-
-/* Compatibility Parameters */
-.compat-param {
- color: #94a3b8 !important;
- font-style: italic;
-}
-
-/* Partially Supported Parameters */
-.partial-param {
- color: #3b82f6 !important;
- font-style: italic;
-}
-
-.compatibility-notice {
- margin: 1rem 0;
- padding: 1rem;
- background-color: #f8fafc;
- border-left: 4px solid #94a3b8;
- border-radius: 0 4px 4px 0;
-}
-
-.compatibility-notice p {
- margin: 0;
- color: #64748b;
- font-size: 0.9rem;
-}
-
-.compatibility-notice strong {
- color: #475569;
-}
-
-.compat-inline {
- color: #94a3b8;
- font-style: italic;
- padding: 0 2px;
-}
-
-.partial-inline {
- color: #3b82f6;
- font-style: italic;
- padding: 0 2px;
-}
-
-/* Update code examples to reflect actual usage */
-.code-block pre code {
- line-height: 1.5;
-}
-
-/* Parameter Details Section */
-.parameter-details {
- margin: 1.5rem 0;
- padding: 1rem;
- background-color: #f0f9ff;
- border-radius: 6px;
- border: 1px solid #e0f2fe;
-}
-
-.parameter-details h4 {
- color: #0369a1;
- margin-top: 0;
- margin-bottom: 1rem;
-}
-
-.parameter-details p {
- margin-bottom: 1rem;
- color: #334155;
-}
-
-.parameter-details code {
- background-color: #e0f2fe;
- padding: 0.2rem 0.4rem;
- border-radius: 4px;
- font-family: 'Fira Code', monospace;
- font-size: 0.9rem;
- color: #0369a1;
-}
-
-/* Examples List */
-.examples-list {
- list-style: none;
- padding-left: 0;
- margin: 1rem 0;
-}
-
-.examples-list li {
- margin: 0.75rem 0;
- padding: 0.5rem 0.75rem;
- background-color: #fff;
- border-left: 3px solid #3b82f6;
- border-radius: 0 4px 4px 0;
-}
-
-.examples-list li strong {
- color: #1e40af;
- margin-right: 0.25rem;
-}
-
-/* Tip Box */
-.tip-box {
- margin-top: 1.5rem;
- padding: 0.75rem 1rem;
- background-color: #fffbeb;
- border-left: 3px solid #f59e0b;
- border-radius: 0 4px 4px 0;
-}
-
-.tip-box p {
- margin: 0;
- color: #92400e;
- font-size: 0.9rem;
-}
-
-.tip-box strong {
- color: #78350f;
-}
-
-/* Warning Box */
-.warning-box {
- margin: 1rem 0;
- padding: 0.75rem 1rem;
- background-color: #fef2f2;
- border-left: 3px solid #ef4444;
- border-radius: 0 4px 4px 0;
-}
-
-.warning-box p {
- margin: 0;
- color: #b91c1c;
- font-size: 0.9rem;
-}
-
-.warning-box strong {
- color: #991b1b;
-}
-
-/* Voice List */
-.voice-list {
- display: flex;
- flex-wrap: wrap;
- gap: 1rem;
- margin: 1rem 0;
-}
-
-.voice-list .voice-name {
- background-color: #f1f5f9;
- border-radius: 4px;
- padding: 0.5rem 1rem;
- font-family: 'Fira Code', monospace;
- font-size: 0.9rem;
- color: #2563eb;
- border: 1px solid #e2e8f0;
- transition: all 0.2s ease;
- cursor: default;
-}
-
-.voice-list .voice-name:hover {
- background-color: #e0f2fe;
- transform: translateY(-2px);
- box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
-}
-
-/* Domain Badge */
-.domain-badge {
- margin-top: 1rem;
- display: inline-block;
- background-color: #f1f5f9;
- border-radius: 4px;
- padding: 0.5rem 1rem;
- border-left: 3px solid #2563eb;
-}
-
-.domain-badge span {
- font-size: 0.9rem;
- color: #64748b;
-}
-
-.domain-badge strong {
- color: #2563eb;
- font-family: 'Fira Code', monospace;
- letter-spacing: 0.5px;
-}
-
-/* Playground Section */
-.playground-section {
- background-color: #fff;
- border-radius: 8px;
- padding: 2rem;
- margin-bottom: 2rem;
- box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
-}
-
-.playground-container {
- display: grid;
- grid-template-columns: 1fr 1fr;
- gap: 2rem;
- margin-top: 1.5rem;
-}
-
-.playground-form {
- display: flex;
- flex-direction: column;
- gap: 1.5rem;
-}
-
-.form-group {
- display: flex;
- flex-direction: column;
- gap: 0.5rem;
-}
-
-.form-group label {
- font-weight: 500;
- color: #2c3e50;
-}
-
-.form-group textarea,
-.form-group select {
- padding: 0.75rem;
- border: 1px solid #e2e8f0;
- border-radius: 6px;
- font-size: 1rem;
- font-family: inherit;
- resize: vertical;
- transition: border-color 0.2s ease;
-}
-
-.form-group textarea:focus,
-.form-group select:focus {
- outline: none;
- border-color: #2563eb;
- box-shadow: 0 0 0 3px rgba(37, 99, 235, 0.1);
-}
-
-.playground-button {
- background-color: #2563eb;
- color: white;
- border: none;
- padding: 0.75rem 1.5rem;
- border-radius: 6px;
- font-size: 1rem;
- font-weight: 500;
- cursor: pointer;
- display: flex;
- align-items: center;
- justify-content: center;
- gap: 0.5rem;
- transition: background-color 0.2s ease;
-}
-
-.playground-button:hover {
- background-color: #1d4ed8;
-}
-
-.playground-button:disabled {
- background-color: #94a3b8;
- cursor: not-allowed;
-}
-
-.playground-output {
- display: flex;
- flex-direction: column;
- gap: 1rem;
-}
-
-.playground-status {
- padding: 1rem;
- border-radius: 6px;
- font-size: 0.9rem;
-}
-
-.playground-status.error {
- background-color: #fef2f2;
- color: #b91c1c;
- border: 1px solid #fee2e2;
-}
-
-.playground-status.success {
- background-color: #f0fdf4;
- color: #166534;
- border: 1px solid #dcfce7;
-}
-
-.playground-audio {
- background-color: #f8fafc;
- border-radius: 6px;
- padding: 1rem;
- min-height: 100px;
- display: flex;
- align-items: center;
- justify-content: center;
-}
-
-.playground-audio audio {
- width: 100%;
- max-width: 400px;
-}
-
-@media (max-width: 768px) {
- .playground-container {
- grid-template-columns: 1fr;
- }
+:root {
+ --primary-color: #2563eb;
+ --secondary-color: #1e40af;
+ --background-color: #0f172a;
+ --text-color: #e2e8f0;
+ --border-color: #1e293b;
+ --success-color: #10b981;
+ --error-color: #ef4444;
+ --card-bg: #1e293b;
+ --card-hover: #334155;
+ --code-bg: #1e293b;
+ --header-bg: #0f172a;
+ --panel-bg: #ffffff;
+ --docs-bg: #1a1a1a;
+ --docs-text: #e5e7eb;
+ --gradient-start: #60a5fa;
+ --gradient-end: #34d399;
+ --disclaimer-bg: #eef2ff;
+ --disclaimer-border: #c7d2fe;
+ --disclaimer-text: #4f46e5;
+}
+
+* {
+ box-sizing: border-box;
+ margin: 0;
+ padding: 0;
+}
+
+body {
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
+ line-height: 1.6;
+ margin: 0;
+ padding: 0;
+ color: #333;
+ background-color: #f5f5f5;
+}
+
+.app-container {
+ max-width: 1200px;
+ margin: 0 auto;
+ padding: 2rem;
+}
+
+.content-wrapper {
+ background-color: #fff;
+ border-radius: 8px;
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
+ padding: 2rem;
+}
+
+/* Header */
+.main-header {
+ margin-bottom: 3rem;
+ padding-bottom: 1rem;
+ border-bottom: 1px solid #eee;
+}
+
+.header-top {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ margin-bottom: 0.5rem;
+}
+
+.github-link {
+ color: #333;
+ font-size: 1.5rem;
+ transition: all 0.2s ease;
+ background-color: #f8fafc;
+ padding: 0.5rem 1rem;
+ border-radius: 6px;
+ border: 1px solid #e2e8f0;
+ display: flex;
+ align-items: center;
+ gap: 0.5rem;
+ text-decoration: none;
+}
+
+.github-link i {
+ font-size: 1.5rem;
+}
+
+.github-link:hover {
+ color: #2563eb;
+ background-color: #eff6ff;
+ border-color: #2563eb;
+ transform: translateY(-2px);
+ box-shadow: 0 4px 6px rgba(37, 99, 235, 0.1);
+}
+
+.header-bottom {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ margin-top: 1rem;
+}
+
+.language-selector {
+ display: flex;
+ gap: 0.5rem;
+}
+
+.lang-btn {
+ padding: 0.5rem 1rem;
+ border: 1px solid #e2e8f0;
+ border-radius: 4px;
+ background: white;
+ cursor: pointer;
+ font-size: 0.9rem;
+ transition: all 0.2s ease;
+}
+
+.lang-btn:hover {
+ border-color: #2563eb;
+ color: #2563eb;
+}
+
+.lang-btn.active {
+ background: #2563eb;
+ color: white;
+ border-color: #2563eb;
+}
+
+.main-header h1 {
+ margin: 0;
+ color: #2c3e50;
+ font-size: 2.5rem;
+}
+
+.subtitle {
+ color: #666;
+ margin: 0.5rem 0 0;
+}
+
+/* Disclaimer Section */
+.disclaimer-notice {
+ background-color: var(--disclaimer-bg);
+ border: 1px solid var(--disclaimer-border);
+ border-radius: 8px;
+ margin-bottom: 2rem;
+ padding: 1.5rem;
+}
+
+.disclaimer-container {
+ display: flex;
+ align-items: flex-start;
+ gap: 1rem;
+}
+
+.disclaimer-icon {
+ color: var(--disclaimer-text);
+ font-size: 1.5rem;
+ padding: 0.5rem;
+ background-color: rgba(79, 70, 229, 0.1);
+ border-radius: 50%;
+}
+
+.disclaimer-content {
+ flex: 1;
+}
+
+.disclaimer-content h2 {
+ color: var(--disclaimer-text);
+ margin-bottom: 0.5rem;
+ font-size: 1.5rem;
+}
+
+.disclaimer-content p {
+ color: #4b5563;
+ margin-bottom: 0;
+}
+
+.disclaimer-content a {
+ color: var(--disclaimer-text);
+ text-decoration: none;
+ font-weight: 500;
+}
+
+.disclaimer-content a:hover {
+ text-decoration: underline;
+}
+
+/* Content sections */
+.content-section {
+ margin-bottom: 3rem;
+}
+
+.content-section h2 {
+ color: #2c3e50;
+ margin-bottom: 1.5rem;
+ font-size: 1.8rem;
+}
+
+/* Code blocks */
+.code-block {
+ background: #1e1e1e;
+ border-radius: 6px;
+ overflow: hidden;
+ margin: 1rem 0;
+}
+
+.code-header {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ padding: 0.5rem 1rem;
+ background: #2d2d2d;
+ border-bottom: 1px solid #3d3d3d;
+}
+
+.code-language {
+ color: #fff;
+ font-size: 0.9rem;
+}
+
+.copy-button {
+ background: none;
+ border: none;
+ color: #fff;
+ cursor: pointer;
+ padding: 0.25rem 0.5rem;
+ font-size: 0.9rem;
+}
+
+.copy-button:hover {
+ color: #4CAF50;
+}
+
+/* Voice table */
+.voice-table {
+ width: 100%;
+ border-collapse: collapse;
+ margin: 1rem 0;
+}
+
+.voice-table th,
+.voice-table td {
+ padding: 0.75rem;
+ text-align: left;
+ border-bottom: 1px solid #eee;
+}
+
+.voice-table th {
+ background-color: #f8f9fa;
+ font-weight: 600;
+ color: #2c3e50;
+}
+
+.voice-table tr:hover {
+ background-color: #f8f9fa;
+}
+
+/* API endpoint */
+.api-endpoint {
+ margin-bottom: 2rem;
+}
+
+.api-endpoint h3 {
+ color: #2c3e50;
+ margin-bottom: 1rem;
+}
+
+.api-endpoint h4 {
+ color: #666;
+ margin: 1.5rem 0 1rem;
+}
+
+/* Code syntax highlighting overrides */
+pre[class*="language-"] {
+ margin: 0;
+ border-radius: 0;
+}
+
+code[class*="language-"] {
+ font-size: 0.9rem;
+ padding: 1rem;
+}
+
+/* Status Section */
+.status-section {
+ margin-bottom: 2rem;
+}
+
+.status-container {
+ display: flex;
+ justify-content: center;
+ width: 100%;
+}
+
+.status-card {
+ background-color: #fff;
+ border-radius: 8px;
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
+ padding: 1.5rem;
+ width: 100%;
+ max-width: 600px;
+}
+
+.status-header {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ margin-bottom: 1rem;
+}
+
+.status-header h3 {
+ margin: 0;
+ color: #2c3e50;
+ font-size: 1.3rem;
+}
+
+.status-indicator {
+ width: 12px;
+ height: 12px;
+ border-radius: 50%;
+ background-color: #10b981; /* Default green */
+}
+
+.queue-stats {
+ display: flex;
+ justify-content: space-between;
+ margin-bottom: 1rem;
+}
+
+.stat-item {
+ display: flex;
+ flex-direction: column;
+}
+
+.stat-label {
+ font-size: 0.9rem;
+ color: #64748b;
+ margin-bottom: 0.25rem;
+}
+
+.stat-value {
+ font-size: 1.5rem;
+ font-weight: 600;
+ color: #2c3e50;
+}
+
+.queue-progress-container {
+ height: 8px;
+ background-color: #e2e8f0;
+ border-radius: 4px;
+ overflow: hidden;
+ margin-bottom: 0.75rem;
+}
+
+.queue-progress-bar {
+ height: 100%;
+ width: 0%;
+ background: linear-gradient(to right, #10b981, #3b82f6);
+ transition: width 0.5s ease, background-color 0.5s ease;
+}
+
+.queue-load-text {
+ text-align: center;
+ font-size: 0.9rem;
+ font-weight: 500;
+ color: #10b981; /* Default green */
+}
+
+/* Load status colors */
+.low-load {
+ color: #10b981 !important; /* Green */
+}
+
+.medium-load {
+ color: #f59e0b !important; /* Yellow/Orange */
+}
+
+.high-load {
+ color: #ef4444 !important; /* Red */
+}
+
+.indicator-low {
+ background-color: #10b981 !important;
+}
+
+.indicator-medium {
+ background-color: #f59e0b !important;
+}
+
+.indicator-high {
+ background-color: #ef4444 !important;
+}
+
+.progress-low {
+ background: linear-gradient(to right, #10b981, #34d399) !important;
+}
+
+.progress-medium {
+ background: linear-gradient(to right, #f59e0b, #fbbf24) !important;
+}
+
+.progress-high {
+ background: linear-gradient(to right, #ef4444, #f87171) !important;
+}
+
+/* Voice Grid */
+.voice-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fill, minmax(250px, 1fr));
+ gap: 1rem;
+ margin-bottom: 1.5rem;
+}
+
+.voice-card {
+ background: var(--card-bg);
+ border: 1px solid var(--border-color);
+ border-radius: 12px;
+ padding: 1.25rem;
+ transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
+ position: relative;
+ overflow: hidden;
+}
+
+.voice-card::after {
+ content: '';
+ position: absolute;
+ top: 0;
+ left: 0;
+ width: 100%;
+ height: 100%;
+ background: linear-gradient(45deg, transparent, rgba(255, 255, 255, 0.05), transparent);
+ transform: translateX(-100%);
+ transition: transform 0.6s ease;
+}
+
+.voice-card:hover::after {
+ transform: translateX(100%);
+}
+
+.voice-card:hover {
+ transform: translateY(-2px);
+ box-shadow: 0 8px 24px rgba(0, 0, 0, 0.2);
+ border-color: var(--primary-color);
+}
+
+.voice-name {
+ font-size: 1.1rem;
+ font-weight: 600;
+ color: var(--text-color);
+ margin-bottom: 0.25rem;
+ text-transform: capitalize;
+ position: relative;
+ display: inline-block;
+}
+
+.voice-name::after {
+ content: '';
+ position: absolute;
+ bottom: -2px;
+ left: 0;
+ width: 0;
+ height: 2px;
+ background: linear-gradient(to right, var(--gradient-start), var(--gradient-end));
+ transition: width 0.3s ease;
+}
+
+.voice-card:hover .voice-name::after {
+ width: 100%;
+}
+
+.voice-description {
+ font-size: 0.8rem;
+ color: #94a3b8;
+}
+
+/* Processing Status */
+#processing-status {
+ font-weight: 500;
+}
+
+#processing-status.processing {
+ color: #60a5fa;
+}
+
+#processing-status.idle {
+ color: #34d399;
+}
+
+/* Responsive Design */
+@media (max-width: 768px) {
+ .app-container {
+ padding: 1rem;
+ }
+
+ .main-header {
+ padding: 2rem 1rem;
+ }
+
+ .main-header h1 {
+ font-size: 2rem;
+ }
+
+ .queue-status {
+ grid-template-columns: 1fr;
+ }
+
+ .voice-grid {
+ grid-template-columns: 1fr;
+ }
+
+ .content-section {
+ margin-bottom: 2rem;
+ }
+
+ .status-card, .voice-card {
+ padding: 1.25rem;
+ }
+
+ .status-icon {
+ width: 2.5rem;
+ height: 2.5rem;
+ font-size: 1.25rem;
+ }
+
+ .status-value {
+ font-size: 1.25rem;
+ }
+}
+
+@media (max-width: 480px) {
+ .main-header h1 {
+ font-size: 1.75rem;
+ }
+
+ .subtitle {
+ font-size: 1rem;
+ }
+
+ .content-section h2 {
+ font-size: 1.5rem;
+ }
+
+ .api-endpoint {
+ padding: 1rem;
+ }
+
+ pre {
+ padding: 1rem;
+ }
+}
+
+/* Scrollbar Styling */
+::-webkit-scrollbar {
+ width: 8px;
+ height: 8px;
+}
+
+::-webkit-scrollbar-track {
+ background: var(--background-color);
+}
+
+::-webkit-scrollbar-thumb {
+ background: var(--border-color);
+ border-radius: 4px;
+}
+
+::-webkit-scrollbar-thumb:hover {
+ background: var(--card-hover);
+}
+
+/* Audio Container */
+.audio-container {
+ margin-top: 20px;
+ display: flex;
+ flex-direction: column;
+ gap: 15px;
+}
+
+.audio-wrapper {
+ background: #2a2a2a;
+ border-radius: 8px;
+ padding: 15px;
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
+}
+
+.voice-label {
+ font-size: 14px;
+ color: #888;
+ margin-bottom: 8px;
+ text-transform: capitalize;
+}
+
+.audio-player {
+ width: 100%;
+ height: 40px;
+ background: #1a1a1a;
+ border-radius: 4px;
+ padding: 5px;
+}
+
+.audio-player::-webkit-media-controls-panel {
+ background: #1a1a1a;
+}
+
+.audio-player::-webkit-media-controls-play-button {
+ background-color: #4CAF50;
+ border-radius: 50%;
+}
+
+.audio-player::-webkit-media-controls-timeline {
+ background-color: #4CAF50;
+ border-radius: 2px;
+}
+
+.audio-player::-webkit-media-controls-volume-slider {
+ background-color: #4CAF50;
+ border-radius: 2px;
+}
+
+/* Header Decoration */
+.header-decoration {
+ position: absolute;
+ top: 0;
+ right: 0;
+ width: 200px;
+ height: 200px;
+ pointer-events: none;
+}
+
+.decoration-circle {
+ position: absolute;
+ border-radius: 50%;
+ opacity: 0.1;
+}
+
+.decoration-circle:nth-child(1) {
+ width: 100px;
+ height: 100px;
+ background: var(--gradient-start);
+ top: 20px;
+ right: 20px;
+}
+
+.decoration-circle:nth-child(2) {
+ width: 60px;
+ height: 60px;
+ background: var(--gradient-end);
+ top: 60px;
+ right: 60px;
+}
+
+.decoration-circle:nth-child(3) {
+ width: 40px;
+ height: 40px;
+ background: var(--primary-color);
+ top: 100px;
+ right: 100px;
+}
+
+/* Voice Icon */
+.voice-icon {
+ width: 40px;
+ height: 40px;
+ background: rgba(255, 255, 255, 0.1);
+ border-radius: 50%;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ margin-bottom: 1rem;
+ transition: all 0.3s ease;
+}
+
+.voice-icon i {
+ font-size: 1.25rem;
+ color: var(--text-color);
+}
+
+.voice-card:hover .voice-icon {
+ background: var(--primary-color);
+ transform: scale(1.1);
+}
+
+/* Section Headers */
+.content-section h2 {
+ display: flex;
+ align-items: center;
+ gap: 0.75rem;
+ margin-bottom: 1.5rem;
+}
+
+.content-section h2 i {
+ color: var(--primary-color);
+ font-size: 1.5rem;
+}
+
+.api-endpoint h3 {
+ display: flex;
+ align-items: center;
+ gap: 0.75rem;
+}
+
+.api-endpoint h3 i {
+ color: var(--primary-color);
+ font-size: 1.25rem;
+}
+
+/* Loading Animation */
+@keyframes pulse {
+ 0% { transform: scale(1); }
+ 50% { transform: scale(1.05); }
+ 100% { transform: scale(1); }
+}
+
+.status-card.processing {
+ animation: pulse 2s infinite;
+}
+
+.status-card.processing .status-icon {
+ animation: spin 2s linear infinite;
+}
+
+@keyframes spin {
+ from { transform: rotate(0deg); }
+ to { transform: rotate(360deg); }
+}
+
+/* Parameter and Error Tables */
+.params-table,
+.error-table {
+ width: 100%;
+ border-collapse: collapse;
+ margin: 1rem 0;
+ background-color: #fff;
+ border: 1px solid #eee;
+ border-radius: 6px;
+ overflow: hidden;
+}
+
+.params-table th,
+.params-table td,
+.error-table th,
+.error-table td {
+ padding: 0.75rem;
+ text-align: left;
+ border-bottom: 1px solid #eee;
+}
+
+.params-table th,
+.error-table th {
+ background-color: #f8f9fa;
+ font-weight: 600;
+ color: #2c3e50;
+ text-transform: uppercase;
+ font-size: 0.85rem;
+}
+
+.params-table td:nth-child(2),
+.params-table td:nth-child(3) {
+ font-family: 'Fira Code', monospace;
+ font-size: 0.9rem;
+}
+
+.params-table td:nth-child(3) {
+ color: #2563eb;
+ font-weight: 500;
+}
+
+.error-table td:first-child {
+ font-family: 'Fira Code', monospace;
+ font-weight: 500;
+ color: #ef4444;
+}
+
+/* Response Format Section */
+.api-endpoint ul {
+ list-style: none;
+ padding-left: 0;
+ margin: 1rem 0;
+}
+
+.api-endpoint ul li {
+ margin: 0.5rem 0;
+ padding-left: 1.5rem;
+ position: relative;
+}
+
+.api-endpoint ul li::before {
+ content: '•';
+ position: absolute;
+ left: 0.5rem;
+ color: #2563eb;
+}
+
+.api-endpoint ul li code {
+ background-color: #f1f5f9;
+ padding: 0.2rem 0.4rem;
+ border-radius: 4px;
+ font-family: 'Fira Code', monospace;
+ font-size: 0.9rem;
+ color: #2563eb;
+}
+
+/* Queue System Section */
+.api-endpoint p {
+ margin: 1rem 0;
+ line-height: 1.6;
+ color: #4a5568;
+}
+
+/* Language Tabs */
+.language-tabs {
+ display: flex;
+ gap: 1rem;
+ margin-bottom: 1rem;
+}
+
+.language-tab {
+ padding: 0.5rem 1rem;
+ background-color: #f1f5f9;
+ border-radius: 4px;
+ cursor: pointer;
+ font-weight: 500;
+ color: #4a5568;
+ transition: all 0.2s ease;
+}
+
+.language-tab:hover,
+.language-tab.active {
+ background-color: #2563eb;
+ color: #fff;
+}
+
+/* Best Used For Column */
+.voice-table td:last-child {
+ color: #4a5568;
+ font-style: italic;
+}
+
+/* Mobile Responsiveness for New Elements */
+@media (max-width: 768px) {
+ .params-table,
+ .error-table {
+ display: block;
+ overflow-x: auto;
+ -webkit-overflow-scrolling: touch;
+ }
+
+ .params-table th,
+ .params-table td,
+ .error-table th,
+ .error-table td {
+ min-width: 120px;
+ }
+}
+
+/* Compatibility Parameters */
+.compat-param {
+ color: #94a3b8 !important;
+ font-style: italic;
+}
+
+/* Partially Supported Parameters */
+.partial-param {
+ color: #3b82f6 !important;
+ font-style: italic;
+}
+
+.compatibility-notice {
+ margin: 1rem 0;
+ padding: 1rem;
+ background-color: #f8fafc;
+ border-left: 4px solid #94a3b8;
+ border-radius: 0 4px 4px 0;
+}
+
+.compatibility-notice p {
+ margin: 0;
+ color: #64748b;
+ font-size: 0.9rem;
+}
+
+.compatibility-notice strong {
+ color: #475569;
+}
+
+.compat-inline {
+ color: #94a3b8;
+ font-style: italic;
+ padding: 0 2px;
+}
+
+.partial-inline {
+ color: #3b82f6;
+ font-style: italic;
+ padding: 0 2px;
+}
+
+/* Update code examples to reflect actual usage */
+.code-block pre code {
+ line-height: 1.5;
+}
+
+/* Parameter Details Section */
+.parameter-details {
+ margin: 1.5rem 0;
+ padding: 1rem;
+ background-color: #f0f9ff;
+ border-radius: 6px;
+ border: 1px solid #e0f2fe;
+}
+
+.parameter-details h4 {
+ color: #0369a1;
+ margin-top: 0;
+ margin-bottom: 1rem;
+}
+
+.parameter-details p {
+ margin-bottom: 1rem;
+ color: #334155;
+}
+
+.parameter-details code {
+ background-color: #e0f2fe;
+ padding: 0.2rem 0.4rem;
+ border-radius: 4px;
+ font-family: 'Fira Code', monospace;
+ font-size: 0.9rem;
+ color: #0369a1;
+}
+
+/* Examples List */
+.examples-list {
+ list-style: none;
+ padding-left: 0;
+ margin: 1rem 0;
+}
+
+.examples-list li {
+ margin: 0.75rem 0;
+ padding: 0.5rem 0.75rem;
+ background-color: #fff;
+ border-left: 3px solid #3b82f6;
+ border-radius: 0 4px 4px 0;
+}
+
+.examples-list li strong {
+ color: #1e40af;
+ margin-right: 0.25rem;
+}
+
+/* Tip Box */
+.tip-box {
+ margin-top: 1.5rem;
+ padding: 0.75rem 1rem;
+ background-color: #fffbeb;
+ border-left: 3px solid #f59e0b;
+ border-radius: 0 4px 4px 0;
+}
+
+.tip-box p {
+ margin: 0;
+ color: #92400e;
+ font-size: 0.9rem;
+}
+
+.tip-box strong {
+ color: #78350f;
+}
+
+/* Warning Box */
+.warning-box {
+ margin: 1rem 0;
+ padding: 0.75rem 1rem;
+ background-color: #fef2f2;
+ border-left: 3px solid #ef4444;
+ border-radius: 0 4px 4px 0;
+}
+
+.warning-box p {
+ margin: 0;
+ color: #b91c1c;
+ font-size: 0.9rem;
+}
+
+.warning-box strong {
+ color: #991b1b;
+}
+
+/* Voice List */
+.voice-list {
+ display: flex;
+ flex-wrap: wrap;
+ gap: 1rem;
+ margin: 1rem 0;
+}
+
+.voice-list .voice-name {
+ background-color: #f1f5f9;
+ border-radius: 4px;
+ padding: 0.5rem 1rem;
+ font-family: 'Fira Code', monospace;
+ font-size: 0.9rem;
+ color: #2563eb;
+ border: 1px solid #e2e8f0;
+ transition: all 0.2s ease;
+ cursor: default;
+}
+
+.voice-list .voice-name:hover {
+ background-color: #e0f2fe;
+ transform: translateY(-2px);
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
+}
+
+/* Domain Badge */
+.domain-badge {
+ margin-top: 1rem;
+ display: inline-block;
+ background-color: #f1f5f9;
+ border-radius: 4px;
+ padding: 0.5rem 1rem;
+ border-left: 3px solid #2563eb;
+}
+
+.domain-badge span {
+ font-size: 0.9rem;
+ color: #64748b;
+}
+
+.domain-badge strong {
+ color: var(--primary-color);
+ font-weight: 600;
+}
+
+.version-badge {
+ background-color: #fff;
+ border: 1px solid #e2e8f0;
+ border-radius: 6px;
+ padding: 8px 16px;
+ margin-left: 12px;
+ display: inline-flex;
+ align-items: center;
+ transition: all 0.2s ease;
+ box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
+}
+
+.version-badge:hover {
+ border-color: var(--primary-color);
+ transform: translateY(-1px);
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
+}
+
+.version-badge span {
+ color: #64748b;
+ font-size: 0.9rem;
+ display: flex;
+ align-items: center;
+ gap: 4px;
+}
+
+.version-badge strong {
+ color: var(--primary-color);
+ font-weight: 600;
+ font-family: 'Fira Code', monospace;
+ letter-spacing: 0.5px;
+}
+
+/* Playground Section */
+.playground-section {
+ background-color: #fff;
+ border-radius: 8px;
+ padding: 2rem;
+ margin-bottom: 2rem;
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
+}
+
+.playground-container {
+ display: grid;
+ grid-template-columns: 1fr 1fr;
+ gap: 2rem;
+ margin-top: 1.5rem;
+}
+
+.playground-form {
+ display: flex;
+ flex-direction: column;
+ gap: 1.5rem;
+}
+
+.form-group {
+ display: flex;
+ flex-direction: column;
+ gap: 0.5rem;
+}
+
+.form-group label {
+ font-weight: 500;
+ color: #2c3e50;
+}
+
+.form-group textarea,
+.form-group select {
+ padding: 0.75rem;
+ border: 1px solid #e2e8f0;
+ border-radius: 6px;
+ font-size: 1rem;
+ font-family: inherit;
+ resize: vertical;
+ transition: border-color 0.2s ease;
+}
+
+.form-group textarea:focus,
+.form-group select:focus {
+ outline: none;
+ border-color: #2563eb;
+ box-shadow: 0 0 0 3px rgba(37, 99, 235, 0.1);
+}
+
+.playground-button {
+ background-color: #2563eb;
+ color: white;
+ border: none;
+ padding: 0.75rem 1.5rem;
+ border-radius: 6px;
+ font-size: 1rem;
+ font-weight: 500;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap: 0.5rem;
+ transition: background-color 0.2s ease;
+}
+
+.playground-button:hover {
+ background-color: #1d4ed8;
+}
+
+.playground-button:disabled {
+ background-color: #94a3b8;
+ cursor: not-allowed;
+}
+
+.playground-output {
+ display: flex;
+ flex-direction: column;
+ gap: 1rem;
+}
+
+.playground-status {
+ padding: 1rem;
+ border-radius: 6px;
+ font-size: 0.9rem;
+}
+
+.playground-status.error {
+ background-color: #fef2f2;
+ color: #b91c1c;
+ border: 1px solid #fee2e2;
+}
+
+.playground-status.success {
+ background-color: #f0fdf4;
+ color: #166534;
+ border: 1px solid #dcfce7;
+}
+
+.playground-audio {
+ background-color: #f8fafc;
+ border-radius: 6px;
+ padding: 1rem;
+ min-height: 100px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+}
+
+.playground-audio audio {
+ width: 100%;
+ max-width: 400px;
+}
+
+@media (max-width: 768px) {
+ .playground-container {
+ grid-template-columns: 1fr;
+ }
}
\ No newline at end of file
diff --git a/utils/__init__.py b/utils/__init__.py
new file mode 100644
index 0000000..a71fa93
--- /dev/null
+++ b/utils/__init__.py
@@ -0,0 +1,5 @@
+"""
+Utils Package
+
+This package contains utility functions and helper classes.
+"""
\ No newline at end of file
diff --git a/utils/config.py b/utils/config.py
new file mode 100644
index 0000000..d8f06a9
--- /dev/null
+++ b/utils/config.py
@@ -0,0 +1,99 @@
+"""
+Configuration Utilities
+
+This module provides utilities for loading and managing configuration settings.
+"""
+
+import os
+import argparse
+import logging
+from dotenv import load_dotenv
+
+logger = logging.getLogger(__name__)
+
+def load_config():
+ """Load configuration from environment variables and command line arguments.
+
+ Returns:
+ argparse.Namespace: The configuration settings
+ """
+ # Load environment variables
+ load_dotenv()
+
+ # Get default values from environment variables
+ default_host = os.getenv("HOST", "localhost")
+ default_port = int(os.getenv("PORT", "7000"))
+ default_verify_ssl = os.getenv("VERIFY_SSL", "true").lower() != "false"
+ default_use_proxy = os.getenv("USE_PROXY", "true").lower() == "true"
+ default_proxy_api = os.getenv("PROXY_API_URL", "https://proxy.scdn.io/api/get_proxy.php")
+ default_proxy_protocol = os.getenv("PROXY_PROTOCOL", "http")
+ default_proxy_batch_size = int(os.getenv("PROXY_BATCH_SIZE", "5"))
+ default_max_queue_size = int(os.getenv("MAX_QUEUE_SIZE", "100"))
+
+ parser = argparse.ArgumentParser(description="Run the TTS API server")
+ parser.add_argument("--host", type=str, default=default_host, help="Host to bind to")
+ parser.add_argument("--port", type=int, default=default_port, help="Port to bind to")
+ parser.add_argument("--no-verify-ssl", action="store_true", help="Disable SSL certificate verification (insecure, use only for testing)")
+ parser.add_argument("--use-proxy", action="store_true", default=default_use_proxy, help="Use proxy pool for IP rotation")
+ parser.add_argument("--proxy-api", type=str, default=default_proxy_api, help="Proxy API URL")
+ parser.add_argument("--proxy-protocol", type=str, default=default_proxy_protocol, help="Proxy protocol (http, https, socks4, socks5, all)")
+ parser.add_argument("--proxy-batch-size", type=int, default=default_proxy_batch_size, help="Number of proxies to fetch at once")
+ parser.add_argument("--max-queue-size", type=int, default=default_max_queue_size, help="Maximum number of tasks in queue")
+ parser.add_argument("--test-connection", action="store_true", help="Test connection to OpenAI.fm and exit")
+
+ args = parser.parse_args()
+
+ # Apply global SSL settings if needed
+ if args.no_verify_ssl or not default_verify_ssl:
+ import ssl
+ # Disable SSL verification globally in Python
+ ssl._create_default_https_context = ssl._create_unverified_context
+ logger.warning("SSL certificate verification disabled GLOBALLY. This is insecure!")
+
+ return args
+
+async def test_connection(session):
+ """Test connection to OpenAI.fm.
+
+ Args:
+ session: aiohttp.ClientSession to use for requests
+ """
+ logger.info("Testing connection to OpenAI.fm...")
+
+ try:
+ logger.info("Sending GET request to OpenAI.fm homepage")
+ async with session.get("https://www.openai.fm") as response:
+ logger.info(f"Homepage status: {response.status}")
+ if response.status == 200:
+ logger.info("Successfully connected to OpenAI.fm homepage")
+ else:
+ logger.error(f"Failed to connect to OpenAI.fm homepage: {response.status}")
+
+ logger.info("Testing API endpoint with minimal request")
+ test_data = {"input": "Test", "voice": "alloy"}
+ import urllib.parse
+ url_encoded_data = urllib.parse.urlencode(test_data)
+
+ async with session.post(
+ "https://www.openai.fm/api/generate",
+ data=url_encoded_data,
+ headers={
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.9",
+ "Origin": "https://www.openai.fm",
+ "Referer": "https://www.openai.fm/",
+ "Content-Type": "application/x-www-form-urlencoded"
+ }
+ ) as response:
+ logger.info(f"API endpoint status: {response.status}")
+ if response.status == 200:
+ data = await response.read()
+ logger.info(f"Successfully received {len(data)} bytes from API")
+ else:
+ text = await response.text()
+ logger.error(f"API request failed: {response.status}, {text}")
+
+ except Exception as e:
+ logger.error(f"Connection test failed with error: {str(e)}")
+ import traceback
+ logger.error(traceback.format_exc())
\ No newline at end of file