diff --git a/.container/.env.example b/.container/.env.example new file mode 100644 index 00000000..dc3a4b44 --- /dev/null +++ b/.container/.env.example @@ -0,0 +1,18 @@ +# Necessary Keys +OPENAI_API_KEY=sk-123456789 + +# Optional Keys +# For Jina URL Reader +JINA_API_KEY=jina_123456789 + +# For Cohere Rerank +COHERE_API_KEY=abcdefghi + +# For Google Search API +# https://developers.google.com/custom-search/v1/overview +GOOGLE_API_KEY=123456789 +# https://cse.google.com/cse/all +SEARCH_ENGINE_ID=123456789 + +# For OpenWeatherMap API +OPENWEATHERMAP_API_KEY=123456789 diff --git a/.container/Dockerfile b/.container/Dockerfile new file mode 100644 index 00000000..2cfcbef7 --- /dev/null +++ b/.container/Dockerfile @@ -0,0 +1,22 @@ +FROM python:3.10-bookworm + +# Install uv and common tools +RUN apt-get update && apt-get install -y curl git && \ + pip install uv && \ + rm -rf /var/lib/apt/lists/* + +# Set workdir +WORKDIR /app/oasis + +# Copy code +COPY . . + +# Set up venv + install dev deps +RUN uv venv .venv --python=3.10 && \ + . .venv/bin/activate && \ + uv pip install -e ".[dev]" && \ + pip install pre-commit mypy && \ + pre-commit install + +# Keep container alive for development +CMD ["tail", "-f", "/dev/null"] diff --git a/.container/README.md b/.container/README.md new file mode 100644 index 00000000..fccb904e --- /dev/null +++ b/.container/README.md @@ -0,0 +1,96 @@ +# Install OASIS with Docker + +Docker provides a consistent and isolated environment to build, run, and develop oasis without worrying about system dependencies. This guide walks you through setting up oasis using Docker, running it, and developing with it. + +## Prerequisites + +- Docker:https://docs.docker.com/engine/install/ +- Docker Compose:https://docs.docker.com/compose/install/ + +## Configure Environment + +Before starting the container, you need to navigate into the +[.container](../.container) folder and create a `.env` file **with your own +API keys**, so that these keys will be present in the environment variables of +the container, which will later be used by OASIS. + +```bash +cd .container + +# Create your own .env file by copying the example +cp .env.example .env + +# Edit .env to add your API keys or custom settings +``` + +## Start Container + +To build and start the development container: + +```bash +docker compose up -d +``` + +This will: + +- Build the image oasis:localdev +- Start the container oasis-localdev +- Set up all necessary dependencies + +After the build is complete, you can verify the list of images and all running containers. + +```bash +# List all Docker images +docker images +# Check running containers +docker ps +``` + +## Enter the Container + +Once running, you can access the container like this: + +```bash +docker compose exec oasis bash +``` + +You’ll now be inside the oasis dev environment. + +From here, you can activate your virtual environment (if used) and run tests: + +```bash +# Any other dev/test command +pytest +pre-commit run --all-files +``` + +## Save Your Progress + +Your local code is volume-mounted into the container. That means any changes you make inside the container are reflected in your local project folder — no need to worry about losing your work. + +## Exit, Stop and Delete the Container + +You can simply press `Ctrl + D` or use the `exit` command to exit the +container. + +After exiting the container, under normal cases the container will still be +running in the background. If you don't need the container anymore, you can +stop and delete the container with the following command. + +```bash +docker compose down +``` + +## Online Images + +For users who only want to have a quick tryout on OASIS, we also provide the +pre-built images on +[our GitHub Container Registry](<>). + +## Pre-built Image (Optional) + +If you only want to try oasis without setting up the build: + +```bash + +``` diff --git a/.container/docker-compose.yaml b/.container/docker-compose.yaml new file mode 100644 index 00000000..d83f88ea --- /dev/null +++ b/.container/docker-compose.yaml @@ -0,0 +1,12 @@ +services: + oasis: + image: oasis:localdev + container_name: oasis-localdev + build: + context: ../ + dockerfile: .container/Dockerfile + volumes: + - ../:/app/oasis + env_file: + - .env + command: ["tail", "-f", "/dev/null"] diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 88077646..e3d5362a 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -13,4 +13,11 @@ Go over all the following points, and put an `x` in all the boxes that apply. - [ ] I have updated the documentation if needed: - [ ] I have added examples if this is a new feature +**Note:** If you are developing a new action for `SocialAgent`, please review the checklist below and mark all applicable items with an `x`. If you're not adding a new action, you can skip this section. + +- [ ] I have added the new action to `ActionType` in [`typing.py`](https://github.com/camel-ai/oasis/blob/main/oasis/social_platform/typing.py). +- [ ] I have added a corresponding test or a similar function, as shown in [`test_user_create_post.py`](https://github.com/camel-ai/oasis/blob/main/test/infra/database/test_user_create_post.py). +- [ ] I have included the new `ActionType` in both [`test_action_docstring.py`](https://github.com/camel-ai/oasis/blob/main/test/agent/test_action_docstring.py) and [`test_twitter_user_agent_all_actions.py`](https://github.com/camel-ai/oasis/blob/main/test/agent/test_twitter_user_agent_all_actions.py). +- [ ] I have documented the new action in [`actions.mdx`](https://github.com/camel-ai/oasis/blob/main/docs/key_modules/actions.mdx); the Mintlify GitHub app will deploy the changes automatically. + If you are unsure about any of these, don't hesitate to ask. We are here to help! diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml index ec77f654..9e00b74b 100644 --- a/.github/workflows/python-app.yml +++ b/.github/workflows/python-app.yml @@ -34,6 +34,7 @@ jobs: - name: Run tests env: + OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }} OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} run: | pytest diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2cc734f6..0a7d6af6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,3 +1,6 @@ +default_language_version: + python: python3 + repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fa917686..bee447ff 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -229,7 +229,7 @@ Sprint planning defines what can be delivered in the sprint and how it will be a Our aim is to make the developer setup as straightforward as possible. If you encounter any challenges during the setup process, don't hesitate to reach out to a maintainer. We're here to assist you and ensure that the experience is smooth not just for you but also for future contributors. 😊 -In line with this, we do have specific guidelines for code linting, formatting, and documentation in the codebase. If you find these requirements difficult or even just bothersome to work with, please feel free to get in touch with a maintainer — you can *@ziyi yang in Discord or @熠 in the WeChat group*. We don't want these guidelines to hinder the integration of good code into the codebase, so we're more than happy to provide support and find a solution that works for you. 🤝 +In line with this, we do have specific guidelines for code linting, formatting, and documentation in the codebase. If you find these requirements difficult or even just bothersome to work with, please feel free to get in touch with a maintainer — you can *@doudou_wu in Discord or @张再斌 in the WeChat group*. We don't want these guidelines to hinder the integration of good code into the codebase, so we're more than happy to provide support and find a solution that works for you. 🤝 ## Quick Start 🚀 diff --git a/README.md b/README.md index 43d7160c..030154cc 100644 --- a/README.md +++ b/README.md @@ -249,8 +249,9 @@ To discover how to create profiles for large-scale users, as well as how to visu ### Latest Updates -📢 Add features for creating group chats, sending messages in group chats, and leaving group chats. - 📆 June 6, 2025 +📢 Add the report post action to mark inappropriate content. - 📆 June 8, 2025 +- Add features for creating group chats, sending messages in group chats, and leaving group chats. - 📆 June 6, 2025 - Support Interview Action for asking agents specific questions and getting answers. - 📆 June 2, 2025 - Support customization of each agent's models, tools, and prompts; refactor the interface to follow the PettingZoo style. - 📆 May 22, 2025 - Refactor into the OASIS environment, publish camel-oasis on PyPI, and release the documentation. - 📆 April 24, 2025 @@ -262,6 +263,13 @@ To discover how to create profiles for large-scale users, as well as how to visu - OASIS initially released on arXiv - 📆 November 19, 2024 - OASIS GitHub repository initially launched - 📆 November 19, 2024 +## 🔎 Follow-up Research + +- [MultiAgent4Collusion](https://github.com/renqibing/MultiAgent4Collusion): multi-agent collusion simulation framework in social systems +- More to come... + +If your research is based on OASIS, we'd be happy to feature your work here—feel free to reach out or submit a pull request to add it to the [README](https://github.com/camel-ai/oasis/blob/main/README.md)! + ## 🥂 Contributing to OASIS🏝️ > We greatly appreciate your interest in contributing to our open-source initiative. To ensure a smooth collaboration and the success of contributions, we adhere to a set of contributing guidelines similar to those established by CAMEL. For a comprehensive understanding of the steps involved in contributing to our project, please refer to the OASIS [contributing guidelines](https://github.com/camel-ai/oasis/blob/master/CONTRIBUTING.md). 🤝🚀 diff --git a/assets/wechatgroup.png b/assets/wechatgroup.png index b193c89f..6f571428 100644 Binary files a/assets/wechatgroup.png and b/assets/wechatgroup.png differ diff --git a/docs/cookbooks/twitter_report_post.mdx b/docs/cookbooks/twitter_report_post.mdx new file mode 100644 index 00000000..25bc09bd --- /dev/null +++ b/docs/cookbooks/twitter_report_post.mdx @@ -0,0 +1,333 @@ +--- +title: 'Report Post' +description: 'Comprehensive guide to all available actions in the OASIS simulation environment' +--- + +# Report Post + +This cookbook provides a comprehensive guide to running a Twitter simulation using OASIS, including the post reporting feature. + +## Overview + +The REPORT_POST action type enables you to: +- Report inappropriate or harmful content +- Track reporting history +- Analyze reporting patterns +- Maintain platform content quality + +## Key Features + +- **Manual Report Actions**: Use `ManualAction` with `ActionType.REPORT_POST` to report posts +- **Automatic Report Tracking**: The system automatically collects and stores report information +- **Database Storage**: All report data is stored in the report table for later analysis +- **Concurrent Execution**: Reports can be submitted alongside other social media actions +- **Warning Message Display**: Warning messages are shown when the number of reports exceeds the threshold + +## Important Note + +The `ActionType.REPORT_POST` should be included in the `available_actions` list when creating your agent graph, as it's a regular social media action that agents can perform. + +## Complete Example + +```python +import asyncio +import os +import json +import sqlite3 + +from camel.models import ModelFactory +from camel.types import ModelPlatformType, ModelType + +import oasis +from oasis import (ActionType, LLMAction, ManualAction, + generate_twitter_agent_graph) + + +async def main(): + # Create model instance + openai_model = ModelFactory.create( + model_platform=ModelPlatformType.OPENAI, + model_type=ModelType.GPT_4O_MINI, + ) + + # Define available actions for agents + available_actions = [ + ActionType.CREATE_POST, + ActionType.LIKE_POST, + ActionType.REPORT_POST, # Add post reporting functionality + ActionType.REPOST, + ActionType.FOLLOW, + ActionType.DO_NOTHING, + ] + + # Create agent graph + agent_graph = await generate_twitter_agent_graph( + profile_path=("data/twitter_dataset/anonymous_topic_200_1h/" + "False_Business_0.csv"), + model=openai_model, + available_actions=available_actions, + ) + + # Define database path + db_path = "./data/twitter_simulation.db" + + # Remove old database + if os.path.exists(db_path): + os.remove(db_path) + + # Create environment + env = oasis.make( + agent_graph=agent_graph, + platform=oasis.DefaultPlatformType.TWITTER, + database_path=db_path, + ) + + # Run environment + await env.reset() + + # Step 1: Agent 0 creates a post + actions_1 = {} + actions_1[env.agent_graph.get_agent(0)] = ManualAction( + action_type=ActionType.CREATE_POST, + action_args={"content": "Earth is flat."}) + await env.step(actions_1) + + # Step 2: Let some agents respond with LLM actions + actions_2 = { + agent: LLMAction() + for _, agent in env.agent_graph.get_agents([1, 3, 5, 7, 9]) + } + await env.step(actions_2) + + # Step 3: Agent 1 creates another post, Agent 0 reports the first post + actions_3 = {} + actions_3[env.agent_graph.get_agent(1)] = ManualAction( + action_type=ActionType.CREATE_POST, + action_args={"content": "Earth is not flat."}) + + # Create report action + actions_3[env.agent_graph.get_agent(0)] = ManualAction( + action_type=ActionType.REPORT_POST, + action_args={ + "post_id": 1, + "report_reason": "This is misinformation!" + }) + + await env.step(actions_3) + + # Step 4: Let other agents respond + actions_4 = { + agent: LLMAction() + for _, agent in env.agent_graph.get_agents([2, 4, 6, 8, 10]) + } + await env.step(actions_4) + + # Step 5: Interview multiple agents + actions_5 = {} + actions_5[env.agent_graph.get_agent(0)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={ + "prompt": "Has your post 'Earth is flat' been reported? What are your thoughts on this?" + }) + + actions_5[env.agent_graph.get_agent(1)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={ + "prompt": "Has the post 'Earth is flat' been reported? Please share your thoughts." + }) + + actions_5[env.agent_graph.get_agent(2)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={ + "prompt": "What are your thoughts on the debate about Earth's shape?" + }) + + await env.step(actions_5) + + # Step 6: Final LLM actions for remaining agents + actions_6 = { + agent: LLMAction() + for _, agent in env.agent_graph.get_agents([3, 5, 7, 9]) + } + await env.step(actions_6) + + # Close environment + await env.close() + + # Visualize interview results + print("\n=== Interview Results ===") + conn = sqlite3.connect(db_path) + cursor = conn.cursor() + + # Query all interview records + cursor.execute( + """ + SELECT user_id, info, created_at + FROM trace + WHERE action = ? + """, (ActionType.INTERVIEW.value, )) + + # Display interview results + for user_id, info_json, timestamp in cursor.fetchall(): + info = json.loads(info_json) + print(f"\nAgent {user_id} (Timestep {timestamp}):") + print(f"Prompt: {info.get('prompt', 'N/A')}") + print(f"Interview ID: {info.get('interview_id', 'N/A')}") + print(f"Response: {info.get('response', 'N/A')}") + + conn.close() + + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## How It Works + +### 1. Setup and Configuration + +Include `ActionType.REPORT_POST` in your available actions list: + +```python +available_actions = [ + ActionType.CREATE_POST, + ActionType.LIKE_POST, + ActionType.REPORT_POST, # Include reporting functionality + ActionType.REPOST, + ActionType.FOLLOW, + ActionType.DO_NOTHING, +] +``` + +### 2. Reporting Posts + +Use `ManualAction` with `ActionType.REPORT_POST` to report posts: + +```python +# Single report +report_action = ManualAction( + action_type=ActionType.REPORT_POST, + action_args={ + "post_id": 1, + "report_reason": "This is inappropriate content" + }) + +actions = {env.agent_graph.get_agent(0): report_action} +await env.step(actions) +``` + +### 3. Multiple Reports in One Step + +You can have multiple agents report the same post: + +```python +actions = {} +actions[env.agent_graph.get_agent(1)] = ManualAction( + action_type=ActionType.REPORT_POST, + action_args={ + "post_id": 1, + "report_reason": "This is spam" + }) + +actions[env.agent_graph.get_agent(2)] = ManualAction( + action_type=ActionType.REPORT_POST, + action_args={ + "post_id": 1, + "report_reason": "This is misinformation" + }) + +await env.step(actions) +``` + +### 4. Mixing Reports with Other Actions + +Reports can be submitted alongside regular social media actions: + +```python +actions = {} +# Regular post creation +actions[env.agent_graph.get_agent(1)] = ManualAction( + action_type=ActionType.CREATE_POST, + action_args={"content": "Earth is not flat."}) + +# Report action +actions[env.agent_graph.get_agent(0)] = ManualAction( + action_type=ActionType.REPORT_POST, + action_args={ + "post_id": 1, + "report_reason": "This is misinformation!" + }) + +await env.step(actions) +``` + +## Data Storage and Retrieval + +### Database Schema + +Report data is stored in the `report` table with the following structure: +- `report_id`: Unique identifier for the report +- `user_id`: The ID of the reporting agent +- `post_id`: The ID of the reported post +- `report_reason`: The reason for the report +- `created_at`: Timestamp of the report + +### Retrieving Report Results + +```python +import sqlite3 +import json + +conn = sqlite3.connect(db_path) +cursor = conn.cursor() + +# Query all report records +cursor.execute(""" + SELECT report_id, user_id, post_id, report_reason, created_at + FROM report + ORDER BY created_at DESC +""") + +for report_id, user_id, post_id, reason, timestamp in cursor.fetchall(): + print(f"Report {report_id}:") + print(f" User: {user_id}") + print(f" Post: {post_id}") + print(f" Reason: {reason}") + print(f" Time: {timestamp}") + +conn.close() +``` + +## Best Practices + +### 1. Strategic Reporting + +Consider these factors when implementing reporting: +- Set appropriate reporting thresholds +- Monitor reporting frequency +- Analyze report reason distribution +- Process reported content promptly + +### 2. Integration with Other Features + +The reporting feature can be integrated with other features: +- Combine with interview functionality to understand user reactions to reports +- Integrate with content moderation systems +- Work with user behavior analysis systems + +## Common Use Cases + +1. **Content Moderation**: + - Monitor inappropriate content + - Track violations + - Maintain platform quality + +2. **User Behavior Analysis**: + - Analyze reporting patterns + - Identify problematic users + - Optimize content strategy + +3. **Platform Management**: + - Automate report processing + - Generate report summaries + - Develop management strategies diff --git a/docs/cookbooks/twitter_simulation.mdx b/docs/cookbooks/twitter_simulation.mdx index 663ce9c4..9e4e6184 100644 --- a/docs/cookbooks/twitter_simulation.mdx +++ b/docs/cookbooks/twitter_simulation.mdx @@ -11,7 +11,7 @@ This cookbook provides a comprehensive guide to running a Twitter simulation usi import asyncio import os -from camel.models import ModelFactory +from camel.models import ModelFactory, ModelManager from camel.types import ModelPlatformType import oasis @@ -24,35 +24,36 @@ async def main(): vllm_model_1 = ModelFactory.create( model_platform=ModelPlatformType.VLLM, model_type="qwen-2", + # TODO: change to your own vllm server url url="http://10.109.28.7:8080/v1", ) vllm_model_2 = ModelFactory.create( model_platform=ModelPlatformType.VLLM, model_type="qwen-2", + # TODO: change to your own vllm server url url="http://10.109.27.103:8080/v1", ) + # Define the models for agents. Agents will select models based on - # pre-defined scheduling strategies - models = [vllm_model_1, vllm_model_2] + # round-robin strategy + shared_model_manager = ModelManager( + models=[vllm_model_1, vllm_model_2], + scheduling_strategy='round_robin', + ) # Define the available actions for the agents - available_actions = [ - ActionType.CREATE_POST, - ActionType.LIKE_POST, - ActionType.REPOST, - ActionType.FOLLOW, - ActionType.DO_NOTHING, - ActionType.QUOTE_POST, - ] + available_actions = ActionType.get_default_twitter_actions() agent_graph = await generate_twitter_agent_graph( - profile_path="./data/reddit/user_data_36.json", - model=models, + profile_path=("data/twitter_dataset/anonymous_topic_200_1h/" + "False_Business_0.csv"), + model=shared_model_manager, available_actions=available_actions, ) # Define the path to the database db_path = "./data/twitter_simulation.db" + os.environ["OASIS_DB_PATH"] = os.path.abspath(db_path) # Delete the old database if os.path.exists(db_path): diff --git a/docs/key_modules/actions.mdx b/docs/key_modules/actions.mdx index 7dceb3fd..990ccb72 100644 --- a/docs/key_modules/actions.mdx +++ b/docs/key_modules/actions.mdx @@ -90,6 +90,7 @@ OASIS provides a comprehensive set of actions that simulate real social media be | `UNLIKE_POST` | Remove a like from a previously liked post | | `DISLIKE_POST` | Dislike or downvote a post | | `UNDO_DISLIKE_POST` | Remove a dislike from a previously disliked post | +| `REPORT_POST` | Report a post for inappropriate content | | `REPOST` | Repost content without modification (equivalent to retweet) | | `QUOTE_POST` | Repost with additional commentary | | `CREATE_COMMENT` | Create a comment on a post | @@ -195,6 +196,17 @@ action = ManualAction( ) ``` +#### REPORT_POST +```python +action = ManualAction( + action=ActionType.REPORT_POST, + args={ + "post_id": 123, + "report_reason": "This post contains false information" + } +) +``` + #### REPOST ```python action = ManualAction( diff --git a/examples/custom_platform_simulation.py b/examples/custom_platform_simulation.py index 3d3b211b..4f93a4d2 100644 --- a/examples/custom_platform_simulation.py +++ b/examples/custom_platform_simulation.py @@ -49,6 +49,7 @@ async def main(): # Define the path to the database db_path = "./data/twitter_simulation.db" + os.environ["OASIS_DB_PATH"] = os.path.abspath(db_path) # Delete the old database if os.path.exists(db_path): diff --git a/examples/custom_prompt_simulation.py b/examples/custom_prompt_simulation.py index e0cb4783..ffc78a2d 100644 --- a/examples/custom_prompt_simulation.py +++ b/examples/custom_prompt_simulation.py @@ -77,6 +77,7 @@ async def main(): # Define the path to the database db_path = "./data/reddit_simulation.db" + os.environ["OASIS_DB_PATH"] = os.path.abspath(db_path) # Delete the old database if os.path.exists(db_path): diff --git a/examples/different_model_simulation.py b/examples/different_model_simulation.py index 0155e9d0..ab0d1ac7 100644 --- a/examples/different_model_simulation.py +++ b/examples/different_model_simulation.py @@ -78,6 +78,7 @@ async def main(): # Define the path to the database db_path = "./data/reddit_simulation.db" + os.environ["OASIS_DB_PATH"] = os.path.abspath(db_path) # Delete the old database if os.path.exists(db_path): diff --git a/examples/group_chat_simulation.py b/examples/group_chat_simulation.py index 7468b5e5..241ee3d9 100644 --- a/examples/group_chat_simulation.py +++ b/examples/group_chat_simulation.py @@ -51,6 +51,7 @@ async def main(): # Define the path to the database db_path = "./data/twitter_simulation.db" + os.environ["OASIS_DB_PATH"] = os.path.abspath(db_path) # Delete the old database if os.path.exists(db_path): diff --git a/examples/group_chat_simulation_oai.py b/examples/group_chat_simulation_oai.py new file mode 100644 index 00000000..393f073b --- /dev/null +++ b/examples/group_chat_simulation_oai.py @@ -0,0 +1,107 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +import asyncio +import os + +from camel.models import ModelFactory +from camel.types import ModelPlatformType, ModelType + +import oasis +from oasis import (ActionType, LLMAction, ManualAction, + generate_twitter_agent_graph) + + +async def main(): + openai_model = ModelFactory.create( + model_platform=ModelPlatformType.OPENAI, + model_type=ModelType.GPT_4O_MINI, + ) + + # Define the available actions for the agents + available_actions = [ + ActionType.JOIN_GROUP, + ActionType.LEAVE_GROUP, + ActionType.LISTEN_FROM_GROUP, + ActionType.SEND_TO_GROUP, + ActionType.LIKE_POST, + ActionType.UNLIKE_POST, + ActionType.REPOST, + ActionType.QUOTE_POST, + ] + + agent_graph = await generate_twitter_agent_graph( + profile_path=( + "data/twitter_dataset/anonymous_topic_200_1h/False_Business_0.csv" + ), + model=openai_model, + available_actions=available_actions, + ) + + # Define the path to the database + db_path = "./data/twitter_simulation.db" + os.environ["OASIS_DB_PATH"] = os.path.abspath(db_path) + + # Delete the old database + if os.path.exists(db_path): + os.remove(db_path) + + # Make the environment + env = oasis.make( + agent_graph=agent_graph, + platform=oasis.DefaultPlatformType.TWITTER, + database_path=db_path, + ) + + # Run the environment + await env.reset() + + group_result = await env.platform.create_group(1, "AI Group") + group_id = group_result["group_id"] + + actions_0 = {} + + actions_0[env.agent_graph.get_agent(0)] = ManualAction( + action_type=ActionType.CREATE_POST, + action_args={"content": "Hello World."}) + await env.step(actions_0) + + actions_1 = {} + + actions_1[env.agent_graph.get_agent(0)] = ManualAction( + action_type=ActionType.JOIN_GROUP, action_args={"group_id": group_id}) + await env.step(actions_1) + + actions_3 = {} + + actions_3[env.agent_graph.get_agent(1)] = ManualAction( + action_type=ActionType.SEND_TO_GROUP, + action_args={ + "group_id": group_id, + "message": "Hello world! This the best group ever!" + }, + ) + await env.step(actions_3) + + actions_4 = { + agent: LLMAction() + for _, agent in env.agent_graph.get_agents() + } + await env.step(actions_4) + + # Close the environment + await env.close() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/quick_start.py b/examples/quick_start.py new file mode 100644 index 00000000..1015dbde --- /dev/null +++ b/examples/quick_start.py @@ -0,0 +1,117 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +import asyncio +import os + +from camel.models import ModelFactory +from camel.types import ModelPlatformType, ModelType + +import oasis +from oasis import (ActionType, AgentGraph, LLMAction, ManualAction, + SocialAgent, UserInfo) + + +async def main(): + # Define the model for the agents + openai_model = ModelFactory.create( + model_platform=ModelPlatformType.OPENAI, + model_type=ModelType.GPT_4O_MINI, + ) + + # Define the available actions for the agents + available_actions = [ + ActionType.LIKE_POST, + ActionType.CREATE_POST, + ActionType.CREATE_COMMENT, + ActionType.FOLLOW, + ] + + # initialize the agent graph + agent_graph = AgentGraph() + + # initialize the agent alice and add it to the agent graph + agent_alice = SocialAgent( + agent_id=0, + user_info=UserInfo( + user_name="alice", + name="Alice", + description="A tech enthusiast and a fan of OASIS", + profile=None, + recsys_type="reddit", + ), + agent_graph=agent_graph, + model=openai_model, + available_actions=available_actions, + ) + agent_graph.add_agent(agent_alice) + + # initialize the agent bob and add it to the agent graph + agent_bob = SocialAgent( + agent_id=1, + user_info=UserInfo( + user_name="bob", + name="Bob", + description=("A researcher of using OASIS to research " + "the social behavior of users"), + profile=None, + recsys_type="reddit", + ), + agent_graph=agent_graph, + model=openai_model, + available_actions=available_actions, + ) + agent_graph.add_agent(agent_bob) + + # Define the path to the database + db_path = "./reddit_simulation.db" + os.environ["OASIS_DB_PATH"] = os.path.abspath(db_path) + + # Delete the old database + if os.path.exists(db_path): + os.remove(db_path) + + # Make the environment + env = oasis.make( + agent_graph=agent_graph, + platform=oasis.DefaultPlatformType.REDDIT, + database_path=db_path, + ) + + # Run the environment + await env.reset() + + # Define a manual action for the agent alice to create a post + action_hello = { + env.agent_graph.get_agent(0): [ + ManualAction(action_type=ActionType.CREATE_POST, + action_args={"content": "Hello, OASIS World!"}) + ] + } + # Run the manual action + await env.step(action_hello) + + # Define the LLM actions for all agents + all_agents_llm_actions = { + agent: LLMAction() + for _, agent in env.agent_graph.get_agents() + } + # Run the LLM actions + await env.step(all_agents_llm_actions) + + # Close the environment + await env.close() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/reddit_simulation_openai.py b/examples/reddit_simulation_openai.py index 6ef5657b..d3191f88 100644 --- a/examples/reddit_simulation_openai.py +++ b/examples/reddit_simulation_openai.py @@ -40,6 +40,7 @@ async def main(): # Define the path to the database db_path = "./data/reddit_simulation.db" + os.environ["OASIS_DB_PATH"] = os.path.abspath(db_path) # Delete the old database if os.path.exists(db_path): diff --git a/examples/search_tools_simulation.py b/examples/search_tools_simulation.py index 87a5fedc..de480c0f 100644 --- a/examples/search_tools_simulation.py +++ b/examples/search_tools_simulation.py @@ -71,6 +71,7 @@ async def main(): # Define the path to the database db_path = "./data/reddit_simulation.db" + os.environ["OASIS_DB_PATH"] = os.path.abspath(db_path) # Delete the old database if os.path.exists(db_path): diff --git a/examples/sympy_tools_simulation.py b/examples/sympy_tools_simulation.py index 5b00881d..8ce4083b 100644 --- a/examples/sympy_tools_simulation.py +++ b/examples/sympy_tools_simulation.py @@ -71,6 +71,7 @@ async def main(): # Define the path to the database db_path = "./data/reddit_simulation.db" + os.environ["OASIS_DB_PATH"] = os.path.abspath(db_path) # Delete the old database if os.path.exists(db_path): diff --git a/examples/twitter_interview.py b/examples/twitter_interview.py index d2a3c8c2..412ad1bf 100644 --- a/examples/twitter_interview.py +++ b/examples/twitter_interview.py @@ -52,6 +52,7 @@ async def main(): # Define the path to the database db_path = "./data/twitter_simulation.db" + os.environ["OASIS_DB_PATH"] = os.path.abspath(db_path) # Delete the old database if os.path.exists(db_path): @@ -100,6 +101,7 @@ async def main(): await env.step(actions_3) # Fourth timestep: Let some other agents respond + # Activate 5 agents with id 2, 4, 6, 8, 10 actions_4 = { agent: LLMAction() for _, agent in env.agent_graph.get_agents([2, 4, 6, 8, 10]) diff --git a/examples/twitter_misinforeport.py b/examples/twitter_misinforeport.py new file mode 100644 index 00000000..17ae350b --- /dev/null +++ b/examples/twitter_misinforeport.py @@ -0,0 +1,202 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +import asyncio +import json +import os +import sqlite3 + +from camel.models import ModelFactory +from camel.types import ModelPlatformType, ModelType + +import oasis +from oasis import (ActionType, LLMAction, ManualAction, + generate_twitter_agent_graph) + + +async def main(): + openai_model = ModelFactory.create( + model_platform=ModelPlatformType.OPENAI, + model_type=ModelType.GPT_4O_MINI, + ) + + # Define the available actions for the agents + # Note: INTERVIEW is NOT included here to + # prevent LLM from automatically selecting it + # INTERVIEW can still be used manually via ManualAction + available_actions = [ + ActionType.CREATE_POST, + ActionType.LIKE_POST, + ActionType.REPORT_POST, + ActionType.REPOST, + ActionType.QUOTE_POST, + ActionType.FOLLOW, + ActionType.DO_NOTHING, + ] + + agent_graph = await generate_twitter_agent_graph( + profile_path=("data/twitter_dataset/anonymous_topic_200_1h/" + "False_Business_0.csv"), + model=openai_model, + available_actions=available_actions, + ) + + # Define the path to the database + db_path = "./data/twitter_simulation.db" + os.environ["OASIS_DB_PATH"] = os.path.abspath(db_path) + + # Delete the old database + if os.path.exists(db_path): + os.remove(db_path) + + # Make the environment + env = oasis.make( + agent_graph=agent_graph, + platform=oasis.DefaultPlatformType.TWITTER, + database_path=db_path, + ) + + await env.reset() + + # First timestep: Agent 0 creates a post + actions_1 = {} + actions_1[env.agent_graph.get_agent(0)] = ManualAction( + action_type=ActionType.CREATE_POST, + action_args={"content": "Earth is flat and NASA is hiding the truth."}) + await env.step(actions_1) + + # Second timestep: Agent 1 creates a post with correct information + actions_2 = {} + actions_2[env.agent_graph.get_agent(1)] = ManualAction( + action_type=ActionType.CREATE_POST, + action_args={ + "content": + ("Earth is not flat. Here's scientific evidence: [evidence]") + }) + await env.step(actions_2) + + # Third timestep: Agent 1 creates a post, and we interview Agent 0 + actions_3 = {} + actions_3[env.agent_graph.get_agent(2)] = ManualAction( + action_type=ActionType.REPORT_POST, + action_args={ + "post_id": + 1, + "report_reason": + ("This post spreads dangerous misinformation about science.") + }) + await env.step(actions_3) + + # Fourth timestep: Agent 3 reposts Agent 0's post + actions_4 = {} + actions_4[env.agent_graph.get_agent(3)] = ManualAction( + action_type=ActionType.REPOST, action_args={"post_id": 1}) + await env.step(actions_4) + + # Fifth timestep: Agent 4 reports the reposted content + actions_5 = {} + actions_5[env.agent_graph.get_agent(4)] = ManualAction( + action_type=ActionType.REPORT_POST, + action_args={ + "post_id": 4, + "report_reason": ("This repost spreads the same misinformation.") + }) + await env.step(actions_5) + + # Sixth timestep: Agent 5 quotes Agent 0's post with correction + actions_6 = {} + actions_6[env.agent_graph.get_agent(5)] = ManualAction( + action_type=ActionType.QUOTE_POST, + action_args={ + "post_id": + 1, + "quote_content": + ("This is incorrect. Earth is an oblate spheroid, " + "as proven by centuries of scientific research.") + }) + await env.step(actions_6) + + # Seventh timestep: More agents report the original post + actions_7 = {} + actions_7[env.agent_graph.get_agent(6)] = ManualAction( + action_type=ActionType.REPORT_POST, + action_args={ + "post_id": 1, + "report_reason": "Misinformation about Earth's shape." + }) + await env.step(actions_7) + + # Eighth timestep: Interview agents about their actions + actions_8 = {} + actions_8[env.agent_graph.get_agent(0)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={ + "prompt": + ("Your post about Earth being flat has been " + "reported multiple times. What are your thoughts on this?") + }) + + actions_8[env.agent_graph.get_agent(2)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={ + "prompt": "Why did you report the post about Earth being flat?" + }) + + actions_8[env.agent_graph.get_agent(3)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={ + "prompt": ("You reposted the flat Earth post. Did you notice " + "the warning message about reports?") + }) + + actions_8[env.agent_graph.get_agent(5)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={ + "prompt": ("You quoted the flat Earth post with a correction. " + "What was your intention?") + }) + await env.step(actions_8) + + # Ninth timestep: Let remaining agents respond with LLM actions + actions_9 = { + agent: LLMAction() + for _, agent in env.agent_graph.get_agents([7, 8, 9, 10]) + } + await env.step(actions_9) + + # Close the environment + await env.close() + + # visualize the interview results + print("\n=== Interview Results ===") + conn = sqlite3.connect(db_path) + cursor = conn.cursor() + cursor.execute( + """ + SELECT user_id, info, created_at + FROM trace + WHERE action = ? + """, (ActionType.INTERVIEW.value, )) + + for user_id, info_json, timestamp in cursor.fetchall(): + info = json.loads(info_json) + print(f"\nAgent {user_id} (Timestep {timestamp}):") + print(f"Prompt: {info.get('prompt', 'N/A')}") + print(f"Interview ID: {info.get('interview_id', 'N/A')}") + print(f"Response: {info.get('response', 'N/A')}") + + conn.close() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/twitter_simulation_openai.py b/examples/twitter_simulation_openai.py index a222f7d8..eedb11a8 100644 --- a/examples/twitter_simulation_openai.py +++ b/examples/twitter_simulation_openai.py @@ -40,6 +40,7 @@ async def main(): # Define the path to the database db_path = "./data/twitter_simulation.db" + os.environ["OASIS_DB_PATH"] = os.path.abspath(db_path) # Delete the old database if os.path.exists(db_path): diff --git a/examples/twitter_simulation_vllm.py b/examples/twitter_simulation_vllm.py index ca83e92b..8f1d25dd 100644 --- a/examples/twitter_simulation_vllm.py +++ b/examples/twitter_simulation_vllm.py @@ -56,6 +56,7 @@ async def main(): # Define the path to the database db_path = "./data/twitter_simulation.db" + os.environ["OASIS_DB_PATH"] = os.path.abspath(db_path) # Delete the old database if os.path.exists(db_path): diff --git a/oasis/__init__.py b/oasis/__init__.py index e80e54e3..c04b43a3 100644 --- a/oasis/__init__.py +++ b/oasis/__init__.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -__version__ = "0.2.2" +__version__ = "0.2.3" from oasis.environment.env_action import LLMAction, ManualAction from oasis.environment.make import make diff --git a/oasis/clock/clock.py b/oasis/clock/clock.py index b0f8e197..22f7985d 100644 --- a/oasis/clock/clock.py +++ b/oasis/clock/clock.py @@ -18,16 +18,31 @@ class Clock: r"""Clock used for the sandbox.""" def __init__(self, k: int = 1): + r""" + Args: + k (int): Time acceleration factor. + """ self.real_start_time = datetime.now() self.k = k self.time_step = 0 def time_transfer(self, now_time: datetime, start_time: datetime) -> datetime: + r"""Convert real time to adjusted simulation time. + + Args: + now_time (datetime): Current real time. + start_time (datetime): Simulation's starting reference time. + + Returns: + datetime: Adjusted time according to the acceleration factor. + """ time_diff = now_time - self.real_start_time adjusted_diff = self.k * time_diff adjusted_time = start_time + adjusted_diff return adjusted_time def get_time_step(self) -> str: + r"""Get the time step count. + """ return str(self.time_step) diff --git a/oasis/environment/env.py b/oasis/environment/env.py index 4d815c60..19072533 100644 --- a/oasis/environment/env.py +++ b/oasis/environment/env.py @@ -46,6 +46,9 @@ class OasisEnv: + r""" + The environment for social platform simulation. + """ def __init__( self, diff --git a/oasis/environment/make.py b/oasis/environment/make.py index 5d790b87..50b65ff1 100644 --- a/oasis/environment/make.py +++ b/oasis/environment/make.py @@ -15,5 +15,7 @@ def make(*args, **kwargs): + r"""Create a new Oasis simulation environment. + """ obj = OasisEnv(*args, **kwargs) return obj diff --git a/oasis/social_agent/agent.py b/oasis/social_agent/agent.py index a93847c1..0871720b 100644 --- a/oasis/social_agent/agent.py +++ b/oasis/social_agent/agent.py @@ -66,8 +66,30 @@ def __init__(self, agent_graph: "AgentGraph" = None, available_actions: list[ActionType] = None, tools: Optional[List[Union[FunctionTool, Callable]]] = None, - single_iteration: bool = True, + max_iteration: int = 1, interview_record: bool = False): + r"""Initialize a social agent with specific information. + + Args: + agent_id (int): The ID of the agent. + user_info (UserInfo): Complete social profile configuration. + user_info_template (TextPrompt | None): Template for displaying + user information. If None, uses the default system message + template. + channel (Channel | None): Social platform channel to connect agents + to. If None, create a new channel. + model (Optional[Union[BaseModelBackend, List[BaseModelBackend], + ModelManager]]): Configuration for the backend LLM(s). + agent_graph (AgentGraph): Social connection graph representing the + agent's network and relationships. + available_actions (list[ActionType]): Subset of allowed action + types. If None, all action types are not permitted. + tools (Optional[List[Union[FunctionTool, Callable]]]): Tools that + supported by openai. + single_iteration (bool): Whether to run in single-step mode. + interview_record (bool): Whether to enable saving interview + interactions. + """ self.social_agent_id = agent_id self.user_info = user_info self.channel = channel or Channel() @@ -103,11 +125,13 @@ def __init__(self, ] ] all_tools = (tools or []) + (self.action_tools or []) - super().__init__(system_message=system_message, - model=model, - scheduling_strategy='random_model', - tools=all_tools, - single_iteration=single_iteration) + super().__init__( + system_message=system_message, + model=model, + scheduling_strategy='random_model', + tools=all_tools, + ) + self.max_iteration = max_iteration self.interview_record = interview_record self.agent_graph = agent_graph self.test_prompt = ( @@ -121,6 +145,10 @@ def __init__(self, "What do you think Helen should do?") async def perform_action_by_llm(self): + r"""Perform social media actions. The agent observes + its social environment and takes actions based on the + current state of the environment. + """ # Get posts: env_prompt = await self.env.to_text_prompt() user_msg = BaseMessage.make_user_message( @@ -153,6 +181,9 @@ async def perform_action_by_llm(self): return e async def perform_test(self): + r""" + Present a standardized test scenario to evaluate + the agent's decision-making in social contexts.""" """ doing group polarization test for all agents. TODO: rewrite the function according to the ChatAgent. @@ -247,6 +278,9 @@ async def perform_interview(self, interview_prompt: str): } async def perform_action_by_hci(self) -> Any: + r"""Present a list of available social actions for manual selection and + guides the user through providing required arguments. + """ print("Please choose one function to perform:") function_list = self.env.action.get_openai_function_list() for i in range(len(function_list)): @@ -274,6 +308,12 @@ async def perform_action_by_hci(self) -> Any: return result async def perform_action_by_data(self, func_name, *args, **kwargs) -> Any: + r""" + Execute a specific social action with provided arguments. + + Args: + func_name (Union[ActionType, str]): The action to perform. + """ func_name = func_name.value if isinstance(func_name, ActionType) else func_name function_list = self.env.action.get_openai_function_list() @@ -299,6 +339,15 @@ def perform_agent_graph_action( r"""Remove edge if action is unfollow or add edge if action is follow to the agent graph. """ + r"""Update the social graph based on relationship actions. + + Args: + action_name (str): The type of relationship action + (follow/unfollow). + arguments (dict): Action arguments containing + 'followee_id' for identifying. + """ + # ... [method implementation remains unchanged] ... if "unfollow" in action_name: followee_id: int | None = arguments.get("followee_id", None) if followee_id is None: @@ -315,5 +364,10 @@ def perform_agent_graph_action( f"Agent {self.social_agent_id} followed Agent {followee_id}") def __str__(self) -> str: + r"""Return a string representation of the SocialAgent. + + Returns: + str: String in format "SocialAgent(agent_id=X, model_type=Y)" + """ return (f"{self.__class__.__name__}(agent_id={self.social_agent_id}, " f"model_type={self.model_type.value})") diff --git a/oasis/social_agent/agent_action.py b/oasis/social_agent/agent_action.py index 87bc867a..482d659d 100644 --- a/oasis/social_agent/agent_action.py +++ b/oasis/social_agent/agent_action.py @@ -20,12 +20,36 @@ class SocialAction: + r"""Class provides a complete set of social media + interaction capabilities for agents, including post + creation, like post and others. All actions are performed + asynchronously through a channel communication mechanism. + """ def __init__(self, agent_id: int, channel: Channel): + r""" + Initialize the social action. + + Args: + agent_id (int): The ID of the agent that will perform + the action. + channel (Channel): Communication channel instance for + platform interaction. + """ self.agent_id = agent_id self.channel = channel def get_openai_function_list(self) -> list[FunctionTool]: + r""" + Convert social action method into a FunctionTool which is compatible to + OpenAI's function calling API. This enables LLM agents to dynamically + select and invoke social actions. + + Returns: + list[FunctionTool]: Complete set of wrapped social actions + as function tools, ready for integration with LLM function + calling systems. + """ return [ FunctionTool(func) for func in [ self.create_post, @@ -51,14 +75,24 @@ def get_openai_function_list(self) -> list[FunctionTool]: self.unmute, self.purchase_product, self.interview, + self.report_post, self.join_group, self.leave_group, self.send_to_group, self.create_group, + self.listen_from_group, ] ] async def perform_action(self, message: Any, type: str): + r""" + Execute specific social actions based on the message + and type through the channel. + + Args: + message (Any): Specific action instructions. + type (str): Action type identifier from ActionType enum. + """ message_id = await self.channel.write_to_receive_queue( (self.agent_id, message, type)) response = await self.channel.read_from_send_queue(message_id) @@ -670,6 +704,34 @@ async def interview(self, prompt: str): """ return await self.perform_action(prompt, ActionType.INTERVIEW.value) + async def report_post(self, post_id: int, report_reason: str): + r"""Report a specified post with a given reason. + + This method invokes an asynchronous action to report a specified post + with a given reason. Upon successful execution, it returns a + dictionary indicating success and the ID of the newly created report. + + Args: + post_id (int): The ID of the post to be reported. + report_reason (str): The reason for reporting the post. + + Returns: + dict: A dictionary with two key-value pairs. The 'success' key + maps to a boolean indicating whether the report creation was + successful. The 'report_id' key maps to the integer ID of the + newly created report. + + Example of a successful return: + {"success": True, "report_id": 123} + + Note: + Attempting to report a post that the user has already reported will + result in a failure. + """ + report_message = (post_id, report_reason) + return await self.perform_action(report_message, + ActionType.REPORT_POST.value) + async def create_group(self, group_name: str): r"""Creates a new group on the platform. diff --git a/oasis/social_agent/agent_environment.py b/oasis/social_agent/agent_environment.py index 516bc208..bb2fb99f 100644 --- a/oasis/social_agent/agent_environment.py +++ b/oasis/social_agent/agent_environment.py @@ -14,13 +14,19 @@ from __future__ import annotations import json +import sqlite3 from abc import ABC, abstractmethod from string import Template from oasis.social_agent.agent_action import SocialAction +from oasis.social_platform.database import get_db_path class Environment(ABC): + r""" + An abstract base class representing an environment + that can be converted to text. + """ @abstractmethod def to_text_prompt(self) -> str: @@ -29,6 +35,11 @@ def to_text_prompt(self) -> str: class SocialEnvironment(Environment): + r""" + Class for translating raw platform data into prompts. The templated + prompts can be customized with specific parameters such as number of + followers and number of all groups. + """ followers_env_template = Template("I have $num_followers followers.") follows_env_template = Template("I have $num_follows follows.") @@ -51,9 +62,23 @@ class SocialEnvironment(Environment): "posts content. Do not limit your action in just `like` to like posts") def __init__(self, action: SocialAction): + r"""Initialize the social environment. + + Args: + action (SocialAction): Pre-configured social action instance that + handles actual platform interactions. + """ self.action = action async def get_posts_env(self) -> str: + r"""Fetch the latest posts and formats them. Then generate the post + description. + + Returns: + str: Formatted post feed description. If success, fill in the + latest information into template prompt. If refresh fails, show the + fail message. + """ posts = await self.action.refresh() # TODO: Replace posts json format string to other formats if posts["success"]: @@ -64,14 +89,60 @@ async def get_posts_env(self) -> str: return posts_env async def get_followers_env(self) -> str: + r"""Fetch the number of followers and generate followers description. + + Returns: + str: Fill in the latest information into template prompt. + Example: "I have 40 followers." + """ # TODO: Implement followers env - return self.followers_env_template.substitute(num_followers=0) + agent_id = self.action.agent_id + db_path = get_db_path() + try: + conn = sqlite3.connect(db_path) + cursor = conn.cursor() + cursor.execute("SELECT num_followers FROM user WHERE agent_id = ?", + (agent_id, )) + result = cursor.fetchone() + num_followers = result[0] if result else 0 + conn.close() + except Exception: + num_followers = 0 + return self.followers_env_template.substitute( + {"num_followers": num_followers}) async def get_follows_env(self) -> str: + r"""Fetch the number of follows and generate follows description. + + Returns: + str: Fill in the latest information into template prompt. + Example: "I have 50 follows." + """ # TODO: Implement follows env - return self.follows_env_template.substitute(num_follows=0) + agent_id = self.action.agent_id + try: + db_path = get_db_path() + conn = sqlite3.connect(db_path) + cursor = conn.cursor() + cursor.execute( + "SELECT num_followings FROM user WHERE agent_id = ?", + (agent_id, )) + result = cursor.fetchone() + num_followings = result[0] if result else 0 + conn.close() + except Exception: + num_followings = 0 + return self.follows_env_template.substitute( + {"num_follows": num_followings}) async def get_group_env(self) -> str: + r"""Fetch group information (e.g. all_groups) and generate group + interaction environment description. + + Returns: + str: If group exist, Fill in the latest information + into template prompt. If it doesn't, return "No groups" + """ groups = await self.action.listen_from_group() if groups["success"]: all_groups = json.dumps(groups["all_groups"]) @@ -89,9 +160,16 @@ async def get_group_env(self) -> str: async def to_text_prompt( self, include_posts: bool = True, - include_followers: bool = False, - include_follows: bool = False, + include_followers: bool = True, + include_follows: bool = True, ) -> str: + r"""Generate social environment prompt from selected components. + + Args: + include_posts (bool): Whether to include post feed. + include_followers (bool): Whether to include follower count. + include_follows (bool): Whether to include follows count. + """ followers_env = (await self.get_followers_env() if include_follows else "No followers.") follows_env = (await self.get_follows_env() diff --git a/oasis/social_agent/agent_graph.py b/oasis/social_agent/agent_graph.py index c7819314..8799195d 100644 --- a/oasis/social_agent/agent_graph.py +++ b/oasis/social_agent/agent_graph.py @@ -23,8 +23,18 @@ class Neo4jHandler: + r""" + This class provides methods to create, delete, and manage + agents (nodes) and their relationships (edges) in a Neo4j + graph database. + """ def __init__(self, nei4j_config: Neo4jConfig): + r"""Initialize the Neo4jHandler with the given configuration. + + Args: + nei4j_config (Neo4jConfig): Configuration for Neo4j connection. + """ self.driver = GraphDatabase.driver( nei4j_config.uri, auth=(nei4j_config.username, nei4j_config.password), @@ -32,13 +42,27 @@ def __init__(self, nei4j_config: Neo4jConfig): self.driver.verify_connectivity() def close(self): + r"""Close the Neo4j driver connection. + """ self.driver.close() def create_agent(self, agent_id: int): + r""" + Create a new agent node in the Neo4j graph. + + Args: + agent_id (int): The ID of the agent. + """ with self.driver.session() as session: session.write_transaction(self._create_and_return_agent, agent_id) def delete_agent(self, agent_id: int): + r""" + Delete an agent node and all its relationships from the graph. + + Args: + agent_id (int): ID of the agent to be deleted. + """ with self.driver.session() as session: session.write_transaction( self._delete_agent_and_relationships, @@ -46,14 +70,33 @@ def delete_agent(self, agent_id: int): ) def get_number_of_nodes(self) -> int: + r""" + Get the total number of agent nodes in the graph. + + Returns: + int: Count of nodes in the graph. + """ with self.driver.session() as session: return session.read_transaction(self._get_number_of_nodes) def get_number_of_edges(self) -> int: + r""" + Get the total number of relationships (edges) in the graph. + + Returns: + int: Count of edges in the graph. + """ with self.driver.session() as session: return session.read_transaction(self._get_number_of_edges) def add_edge(self, src_agent_id: int, dst_agent_id: int): + r""" + Add a directed FOLLOW relationship between two agents. + + Args: + src_agent_id (int): Source agent ID. + dst_agent_id (int): Destination agent ID. + """ with self.driver.session() as session: session.write_transaction( self._add_and_return_edge, @@ -62,6 +105,13 @@ def add_edge(self, src_agent_id: int, dst_agent_id: int): ) def remove_edge(self, src_agent_id: int, dst_agent_id: int): + r""" + Remove a FOLLOW relationship between two agents. + + Args: + src_agent_id (int): Source agent ID. + dst_agent_id (int): Destination agent ID. + """ with self.driver.session() as session: session.write_transaction( self._remove_and_return_edge, @@ -70,14 +120,29 @@ def remove_edge(self, src_agent_id: int, dst_agent_id: int): ) def get_all_nodes(self) -> list[int]: + r""" + Get a list of all agent IDs in the graph. + + Returns: + list[int]: List of agent IDs. + """ with self.driver.session() as session: return session.read_transaction(self._get_all_nodes) def get_all_edges(self) -> list[tuple[int, int]]: + r""" + Get all FOLLOW relationships in the graph. + + Returns: + list[tuple[int, int]]: List of (source, destination) + agent ID pairs. + """ with self.driver.session() as session: return session.read_transaction(self._get_all_edges) def reset_graph(self): + r"""Clear the entire graph by deleting all nodes and relationships. + """ with self.driver.session() as session: session.write_transaction(self._reset_graph) @@ -180,6 +245,15 @@ def __init__( backend: Literal["igraph", "neo4j"] = "igraph", neo4j_config: Neo4jConfig | None = None, ): + r""" + Initialize the agent graph. + + Args: + backend (Literal["igraph", "neo4j"]): The graph backend to use. + Default to "igraph". + neo4j_config (Neo4jConfig | None): Configuration for Neo4j backend. + Required if backend is "neo4j". + """ self.backend = backend if self.backend == "igraph": self.graph = ig.Graph(directed=True) @@ -190,6 +264,8 @@ def __init__( self.agent_mappings: dict[int, SocialAgent] = {} def reset(self): + r"""Reset the graph by removing all nodes and edges. + """ if self.backend == "igraph": self.graph = ig.Graph(directed=True) else: @@ -197,6 +273,12 @@ def reset(self): self.agent_mappings: dict[int, SocialAgent] = {} def add_agent(self, agent: SocialAgent): + r""" + Add a new social agent to the graph. + + Args: + agent (SocialAgent): The agent to be added to the graph. + """ if self.backend == "igraph": self.graph.add_vertex(agent.social_agent_id) else: @@ -204,12 +286,25 @@ def add_agent(self, agent: SocialAgent): self.agent_mappings[agent.social_agent_id] = agent def add_edge(self, agent_id_0: int, agent_id_1: int): + r""" + Add a directed edge between two agents. + + Args: + agent_id_0 (int): Source agent ID. + agent_id_1 (int): Destination agent ID. + """ try: self.graph.add_edge(agent_id_0, agent_id_1) except Exception: pass def remove_agent(self, agent: SocialAgent): + r""" + Remove a social agent from the graph. + + Args: + agent (SocialAgent): The agent to be deleted from the graph. + """ if self.backend == "igraph": self.graph.delete_vertices(agent.social_agent_id) else: @@ -217,6 +312,13 @@ def remove_agent(self, agent: SocialAgent): del self.agent_mappings[agent.social_agent_id] def remove_edge(self, agent_id_0: int, agent_id_1: int): + r""" + Remove a directed edge between two agents. + + Args: + agent_id_0 (int): Source agent ID. + agent_id_1 (int): Destination agent ID. + """ if self.backend == "igraph": if self.graph.are_connected(agent_id_0, agent_id_1): self.graph.delete_edges([(agent_id_0, agent_id_1)]) @@ -224,11 +326,30 @@ def remove_edge(self, agent_id_0: int, agent_id_1: int): self.graph.remove_edge(agent_id_0, agent_id_1) def get_agent(self, agent_id: int) -> SocialAgent: + r""" + Get a social agent by its ID. + + Args: + agent_id (int): The ID of the agent to retrive. + + Returns: + SocialAgent: The requested social agent. + """ return self.agent_mappings[agent_id] def get_agents( self, agent_ids: list[int] = None) -> list[tuple[int, SocialAgent]]: + r""" + Get specific agents by their IDs. + + Args: + agent_ids (list[int], optional): List of agent IDs to retrieve. + If None, returns all agents. + + Returns: + list[tuple[int, SocialAgent]]: List of (agent_id, agent) tuples. + """ if agent_ids: return [(agent_id, self.get_agent(agent_id)) for agent_id in agent_ids] @@ -240,24 +361,45 @@ def get_agents( for agent_id in self.graph.get_all_nodes()] def get_edges(self) -> list[tuple[int, int]]: + r""" + Get all edges in the graph. + + Returns: + list[tuple[int, int]]: List of (source, destination) + agent ID pairs. + """ if self.backend == "igraph": return [(edge.source, edge.target) for edge in self.graph.es] else: return self.graph.get_all_edges() def get_num_nodes(self) -> int: + r""" + Get the number of nodes in the graph. + + Returns: + int: Number of nodes. + """ if self.backend == "igraph": return self.graph.vcount() else: return self.graph.get_number_of_nodes() def get_num_edges(self) -> int: + r""" + Get the number of edges in the graph. + + Returns: + int: Number of edges. + """ if self.backend == "igraph": return self.graph.ecount() else: return self.graph.get_number_of_edges() def close(self) -> None: + r"""Close the graph connection. + """ if self.backend == "neo4j": self.graph.close() @@ -272,6 +414,20 @@ def visualize( width: int = 1000, height: int = 1000, ): + r""" + Visualize the graph and save it to a file only when + the backend is igraph. + + Args: + path (str): Path to save the visualization. + vertex_size (int, optional): Size of vertices. + edge_arrow_size (float, optional): Size of edge arrows. + with_labels (bool, optional): Whether to show vertex labels. + vertex_color (str, optional): Color of vertices. + vertex_frame_width (int, optional): Width of vertex frames. + width (int, optional): Width of the image. + height (int, optional): Height of the image. + """ if self.backend == "neo4j": raise ValueError("Neo4j backend does not support visualization.") layout = self.graph.layout("auto") diff --git a/oasis/social_agent/agents_generator.py b/oasis/social_agent/agents_generator.py index 65435906..a3f7bee2 100644 --- a/oasis/social_agent/agents_generator.py +++ b/oasis/social_agent/agents_generator.py @@ -349,6 +349,20 @@ async def generate_controllable_agents( channel: Channel, control_user_num: int, ) -> tuple[AgentGraph, dict]: + r""" + Create a specified number of controllable agents where each agent's profile + details are entered manually through user input. All agents automatically + follow each other in the social graph. + + Args: + channel (Channel): The social platform channel to connect agents to. + control_user_num (int): Number of controllable agents to create. + + Returns: + tuple[AgentGraph, dict]: A tuple containing the latest agent graph + with all created agents and the mapping dictionary of agent IDs to + platform user IDs. + """ agent_graph = AgentGraph() agent_user_id_mapping = {} for i in range(control_user_num): @@ -391,6 +405,21 @@ async def gen_control_agents_with_data( control_user_num: int, models: list[BaseModelBackend] | None = None, ) -> tuple[AgentGraph, dict]: + r"""Create a specified number of controllable agents with + predefined profile data. All agents won't automatically + follow each other in the social graph. + + Args: + channel (Channel): The social platform channel to connect agents to. + control_user_num (int): Number of controllable agents to create. + models (list[BaseModelBackend] | None): Optional LLM backends + for agents. + + Returns: + tuple[AgentGraph, dict]: A tuple containing the latest agent + graph with all created agents and the mapping dictionary of + agent IDs to platform user IDs. + """ agent_graph = AgentGraph() agent_user_id_mapping = {} for i in range(control_user_num): @@ -439,6 +468,26 @@ async def generate_reddit_agents( ModelManager]] = None, available_actions: list[ActionType] = None, ) -> AgentGraph: + r"""Create social agents with detailed profiles loaded from a JSON file and + configure their initial relationships with other agents. + + Args: + agent_info_path (str): Path to JSON file containing agent profiles. + channel (Channel): Social platform channel to connect agents to. + agent_graph (AgentGraph | None): Social connection graph representing + the agent's network and relationships. + agent_user_id_mapping (dict[int, int] | None): Existing mapping for + user and agent. + follow_post_agent (bool): Whether to make agents follow post agent. + mute_post_agent (bool): Whether to make agents mute post agent. + model (Optional[Union[BaseModelBackend, List[BaseModelBackend], + ModelManager]]): Configuration for the backend LLM(s). + available_actions (list[ActionType]): Subset of allowed action types. + If None, all action types are not permitted. + + Returns: + AgentGraph: The latest agent graph with all created agents. + """ if agent_user_id_mapping is None: agent_user_id_mapping = {} if agent_graph is None: @@ -538,6 +587,16 @@ def connect_platform_channel( channel: Channel, agent_graph: AgentGraph | None = None, ) -> AgentGraph: + r"""Connect all agents in a graph to a specified platform channel. + + Args: + channel (Channel): Social platform channel to connect agents to. + agent_graph (AgentGraph | None): Graph containing all agents to + connect. + + Returns: + AgentGraph: The latest agent graph with updated channel connections. + """ for _, agent in agent_graph.get_agents(): agent.channel = channel agent.env.action.channel = channel @@ -548,6 +607,16 @@ async def generate_custom_agents( channel: Channel, agent_graph: AgentGraph | None = None, ) -> AgentGraph: + r"""Register existing agents to a graph with the specific platform. + + Args: + channel (Channel): The social platform channel to register with. + agent_graph (AgentGraph | None): Graph for all agents needed to + register. + + Returns: + AgentGraph: The updated graph with currently registered agents. + """ if agent_graph is None: agent_graph = AgentGraph() @@ -570,6 +639,19 @@ async def generate_reddit_agent_graph( ModelManager]] = None, available_actions: list[ActionType] = None, ) -> AgentGraph: + r"""Create a Reddit-style agent graph from profile data without + platform registration. + + Args: + profile_path (str): Path to JSON file containing agent profiles. + model (Optional[Union[BaseModelBackend, List[BaseModelBackend], + ModelManager]]): Configuration for the backend LLM(s). + available_actions (list[ActionType]): Subset of allowed action types. + If None, all action types are not permitted. + + Returns: + AgentGraph: The latest graph with set agents. + """ agent_graph = AgentGraph() with open(profile_path, "r") as file: agent_info = json.load(file) @@ -617,6 +699,19 @@ async def generate_twitter_agent_graph( ModelManager]] = None, available_actions: list[ActionType] = None, ) -> AgentGraph: + r"""Create a Twitter-style agent graph from CSV profile data without + platform registeration. + + Args: + profile_path (str): Path to CSV file containing agent profiles. + model (Optional[Union[BaseModelBackend, List[BaseModelBackend], + ModelManager]]): Configuration for the backend LLM(s). + available_actions (list[ActionType]): Subset of allowed action types. + If None, all action types are not permitted. + + Returns: + AgentGraph: The latest graph with set agents. + """ agent_info = pd.read_csv(profile_path) agent_graph = AgentGraph() diff --git a/oasis/social_platform/channel.py b/oasis/social_platform/channel.py index 7d29f053..972b411d 100644 --- a/oasis/social_platform/channel.py +++ b/oasis/social_platform/channel.py @@ -16,50 +16,133 @@ class AsyncSafeDict: + r""" + A class provides a dictionary interface protected + by an asyncio Lock to ensure safe concurrent access + in asynchronous environments. + """ def __init__(self): + r""" + Initialize the AsyncSafeDict with an empty dictionary + and a new lock. + """ self.dict = {} self.lock = asyncio.Lock() async def put(self, key, value): + r""" + Safely insert or update a key-value pair in the dictionary. + + Args: + key: The key to insert/update. + value: The value to associate with the key. + """ async with self.lock: self.dict[key] = value async def get(self, key, default=None): + r""" + Safely retrieve a value from the dictionary. + + Args: + key: The key to retrieve. + default: Value to return if key is not found. + + Returns: + The value associated with the key, + or default if key doesn't exist. + """ async with self.lock: return self.dict.get(key, default) async def pop(self, key, default=None): + r""" + Safely remove a value from the dictionary. + + Args: + key: The key to remove. + default: Value to remove if key is not found. + + Returns: + The value associated with the key, + or default if key doesn't exist + """ async with self.lock: return self.dict.pop(key, default) async def keys(self): + r""" + Safely retrieve all keys from the dictionary. + + Returns: + List: A list of all keys currently in the dictionary. + """ async with self.lock: return list(self.dict.keys()) class Channel: + r""" + A class provides asynchronous communication approaches + for message passing. + """ def __init__(self): + r""" + Initialize the Channel with a received queue and sent dictionary. + """ self.receive_queue = asyncio.Queue() # Used to store received messages # Using an asynchronous safe dictionary to store messages to be sent self.send_dict = AsyncSafeDict() async def receive_from(self): + r""" + Receive a message from the channel's received queue. + + Returns: + tuple: Message from the received queue. + """ message = await self.receive_queue.get() return message async def send_to(self, message): + r""" + Send a message to the channel's send dictionary. + + Args: + message: The message to send. + """ # message_id is the first element of the message message_id = message[0] await self.send_dict.put(message_id, message) async def write_to_receive_queue(self, action_info): + r""" + Write a new message to the message receiving queue + with a generated UUID. + + Args: + action_info: The message content to enqueue. + + Returns: + str: The generated message UUID. + """ message_id = str(uuid.uuid4()) await self.receive_queue.put((message_id, action_info)) return message_id async def read_from_send_queue(self, message_id): + r""" + Continuously check for and retrieve a specific message + from the message sent dictionary. + + Args: + message_id: The UUID of the message to retrieve. + + Returns: + str: The message content when found. + """ while True: if message_id in await self.send_dict.keys(): # Attempting to retrieve the message diff --git a/oasis/social_platform/config/neo4j.py b/oasis/social_platform/config/neo4j.py index af5aacd5..999b25c7 100644 --- a/oasis/social_platform/config/neo4j.py +++ b/oasis/social_platform/config/neo4j.py @@ -16,9 +16,24 @@ @dataclass class Neo4jConfig: + r""" + A configuration class for Neo4j database connection parameters. + + Args: + uri (str | None): The connection URI for the Neo4j database. + username (str | None): The username for database authentication. + password (str | None): The password for database authentication. + """ + uri: str | None = None username: str | None = None password: str | None = None def is_valid(self) -> bool: + r""" + Check if the configuration contains all required parameters. + + Returns: + bool: Whether the configuration is validated. + """ return all([self.uri, self.username, self.password]) diff --git a/oasis/social_platform/config/user.py b/oasis/social_platform/config/user.py index a36b4253..544a218d 100644 --- a/oasis/social_platform/config/user.py +++ b/oasis/social_platform/config/user.py @@ -21,6 +21,20 @@ @dataclass class UserInfo: + r"""A class stores user profile and provides methods to + generate system messages tailored for different platforms. + + Args: + user_name (str | None): Username for the account. + name (str | None): The name of the user. + description (str | None): Brief description of the user. + profile (dict[str, Any] | None): Detailed profile + information dictionary. + recsys_type (str): Type of social media platform. + Defaults to 'twitter'. + is_controllable (bool): Whether the user behavior + can be controlled. + """ user_name: str | None = None name: str | None = None description: str | None = None @@ -29,6 +43,17 @@ class UserInfo: is_controllable: bool = False def to_custom_system_message(self, user_info_template: TextPrompt) -> str: + r"""Generate a custom system message by formatting + the given template with user profile. + + Args: + user_info_template (TextPrompt): A TextPrompt containing + the template string and required keys. + + Returns: + str: The formatted system message string. + """ + required_keys = user_info_template.key_words info_keys = set(self.profile.keys()) missing = required_keys - info_keys @@ -42,12 +67,25 @@ def to_custom_system_message(self, user_info_template: TextPrompt) -> str: return user_info_template.format(**self.profile) def to_system_message(self) -> str: + r"""Generate a system message based on the default system type. + + Returns: + str: The generated system message, either for Twitter + or Reddit based on recsys_type. + """ + if self.recsys_type != "reddit": return self.to_twitter_system_message() else: return self.to_reddit_system_message() def to_twitter_system_message(self) -> str: + r"""Generate a system message for Twitter interactions. + + Returns: + str: The formatted Twitter system message + with user-specific information. + """ name_string = "" description_string = "" if self.name is not None: @@ -77,6 +115,12 @@ def to_twitter_system_message(self) -> str: return system_content def to_reddit_system_message(self) -> str: + r"""Generate a system message for Reddit interactions. + + Returns: + str: The formatted Reddit system message + with user-specific information. + """ name_string = "" description_string = "" if self.name is not None: diff --git a/oasis/social_platform/database.py b/oasis/social_platform/database.py index f4e91dfe..3653aaa3 100644 --- a/oasis/social_platform/database.py +++ b/oasis/social_platform/database.py @@ -19,7 +19,7 @@ from typing import Any, Dict, List SCHEMA_DIR = "social_platform/schema" -DB_DIR = "db" +DB_DIR = "data" DB_NAME = "social_media.db" USER_SCHEMA_SQL = "user.sql" @@ -28,6 +28,7 @@ MUTE_SCHEMA_SQL = "mute.sql" LIKE_SCHEMA_SQL = "like.sql" DISLIKE_SCHEMA_SQL = "dislike.sql" +REPORT_SCHEAM_SQL = "report.sql" TRACE_SCHEMA_SQL = "trace.sql" REC_SCHEMA_SQL = "rec.sql" COMMENT_SCHEMA_SQL = "comment.sql" @@ -45,6 +46,7 @@ "mute", "like", "dislike", + "report", "trace", "rec", "comment.sql", @@ -58,6 +60,17 @@ def get_db_path() -> str: + r"""Get the absolute path to the SQLite database file. + + Returns: + str: Absolute path to the SQLite database file. + """ + # First check if the database path is set in environment variables + env_db_path = os.environ.get("OASIS_DB_PATH") + if env_db_path: + return env_db_path + + # If no environment variable is set, use the original default path curr_file_path = osp.abspath(__file__) parent_dir = osp.dirname(osp.dirname(curr_file_path)) db_dir = osp.join(parent_dir, DB_DIR) @@ -67,6 +80,11 @@ def get_db_path() -> str: def get_schema_dir_path() -> str: + r"""Get the absolute path to the schema directory. + + Returns: + str: Absolute path to the schema directory. + """ curr_file_path = osp.abspath(__file__) parent_dir = osp.dirname(osp.dirname(curr_file_path)) schema_dir = osp.join(parent_dir, SCHEMA_DIR) @@ -76,6 +94,12 @@ def get_schema_dir_path() -> str: def create_db(db_path: str | None = None): r"""Create the database if it does not exist. A :obj:`twitter.db` file will be automatically created in the :obj:`data` directory. + + Args: + db_path (str | None): Optional path to the database file. + + Returns: + tuple: A (connection, cursor) tuple for the created database. """ schema_dir = get_schema_dir_path() if db_path is None: @@ -123,6 +147,12 @@ def create_db(db_path: str | None = None): dislike_sql_script = sql_file.read() cursor.executescript(dislike_sql_script) + # Read and execute the report table SQL script: + report_sql_path = osp.join(schema_dir, REPORT_SCHEAM_SQL) + with open(report_sql_path, "r") as sql_file: + report_sql_script = sql_file.read() + cursor.executescript(report_sql_script) + # Read and execute the trace table SQL script: trace_sql_path = osp.join(schema_dir, TRACE_SCHEMA_SQL) with open(trace_sql_path, "r") as sql_file: @@ -188,6 +218,9 @@ def create_db(db_path: str | None = None): def print_db_tables_summary(): + r""" + Print a summary of all tables in the database. + """ # Connect to the SQLite database db_path = get_db_path() conn = sqlite3.connect(db_path) @@ -234,6 +267,16 @@ def print_db_tables_summary(): def fetch_table_from_db(cursor: sqlite3.Cursor, table_name: str) -> List[Dict[str, Any]]: + r"""Fetch all rows from a given table as a list of dictionaries. + + Args: + cursor (sqlite3.Cursor): Active SQLite cursor object. + table_name (str): Name of the table to fetch. + + Returns: + List[Dict[str, Any]]: List of rows, where each row is represented + as a dictionary mapping column names to values. + """ cursor.execute(f"SELECT * FROM {table_name}") columns = [description[0] for description in cursor.description] data_dicts = [dict(zip(columns, row)) for row in cursor.fetchall()] @@ -241,6 +284,15 @@ def fetch_table_from_db(cursor: sqlite3.Cursor, def fetch_rec_table_as_matrix(cursor: sqlite3.Cursor) -> List[List[int]]: + r"""Fetch the recommended table which contains contains + (user_id, post_id) pairs and group the posts by user. + + Args: + cursor (sqlite3.Cursor): Active SQLite cursor object. + + Returns: + List[List[int]]: Matrix of post_ids grouped by user_id. + """ # First, query all user_ids from the user table, assuming they start from # 1 and are consecutive cursor.execute("SELECT user_id FROM user ORDER BY user_id") @@ -263,6 +315,15 @@ def fetch_rec_table_as_matrix(cursor: sqlite3.Cursor) -> List[List[int]]: def insert_matrix_into_rec_table(cursor: sqlite3.Cursor, matrix: List[List[int]]) -> None: + r"""Insert a matrix of user-post relationships into the reccommended table. + + Args: + cursor (sqlite3.Cursor): Active SQLite cursor object. + matrix (List[List[int]]): Matrix of post_ids grouped by user_id. + + Returns: + None. + """ # Iterate through the matrix, skipping the placeholder at index 0 for user_id, post_ids in enumerate(matrix, start=1): # Adjusted to start counting from 1 diff --git a/oasis/social_platform/platform.py b/oasis/social_platform/platform.py index fcf71369..c7543cf3 100644 --- a/oasis/social_platform/platform.py +++ b/oasis/social_platform/platform.py @@ -51,7 +51,7 @@ class Platform: - r"""Platform.""" + r"""Platform class for social environment.""" def __init__( self, @@ -67,6 +67,31 @@ def __init__( following_post_count=3, use_openai_embedding: bool = False, ): + r""" + Initialize a simulated social media platform environment. + + Args: + db_path (str): Path to the SQLite database file. + channel (Any): Communication channel for message passing. + Defaults to Channel() if not provided. + sandbox_clock (Clock): Custom clock to simulate time flow. + Defaults to Clock(60). + start_time (datetime): Simulation start time. + Defaults to datetime.now(). + show_score (bool): Whether to show social score. Defaults to False. + allow_self_rating (bool): Whether allow user to rate their + own posts/comments. Defaults to True. + recsys_type (str | RecsysType): Recommendation system type. + Defaults to "reddit". + refresh_rec_post_count (int): Number of recommended posts per + refresh. Defaults to 1. + max_rec_post_len (int): Maximum number of recommended posts. + Defaults to 2. + following_post_count (int): Number of posts shown from followed + users per refresh. Defaults to 3. + use_openai_embedding (bool): Whether to use OpenAI embeddings + for text representation. Defaults to False. + """ self.db_path = db_path self.recsys_type = recsys_type # import pdb; pdb.set_trace() @@ -112,6 +137,9 @@ def __init__( self.trend_num_days = 7 self.trend_top_k = 1 + # Report threshold setting + self.report_threshold = 2 + self.pl_utils = PlatformUtils( self.db, self.db_cursor, @@ -119,9 +147,14 @@ def __init__( self.sandbox_clock, self.show_score, self.recsys_type, + self.report_threshold, ) async def running(self): + r""" + Continuously listens for agent actions from the communication + channel and execute each action with appropriate parameters. + """ while True: message_id, data = await self.channel.receive_from() @@ -169,9 +202,20 @@ async def running(self): raise ValueError(f"Action {action} is not supported") def run(self): + r""" + Execute the listening loop. + """ asyncio.run(self.running()) async def sign_up(self, agent_id, user_message): + r""" + Register a new user in the social platform. + + Args: + agent_id (int): The agent ID to be registered. + user_message (tuple): Tuple containing user info + (user_name: str, name: str, bio: str). + """ user_name, name, bio = user_message if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( @@ -180,8 +224,8 @@ async def sign_up(self, agent_id, user_message): current_time = self.sandbox_clock.get_time_step() try: user_insert_query = ( - "INSERT INTO user (user_id, agent_id, user_name, name, bio, " - "created_at, num_followings, num_followers) VALUES " + "INSERT INTO user (user_id, agent_id, user_name, name, " + "bio, created_at, num_followings, num_followers) VALUES " "(?, ?, ?, ?, ?, ?, ?, ?)") self.pl_utils._execute_db_command( user_insert_query, @@ -202,6 +246,13 @@ async def sign_up(self, agent_id, user_message): return {"success": False, "error": str(e)} async def sign_up_product(self, product_id: int, product_name: str): + r""" + Register a new product in the social platform. + + Args: + product_id (int): The product ID to be registered. + product_name (str): Name of the product to be registered. + """ # Note: do not sign up the product with the same product name try: product_insert_query = ( @@ -214,6 +265,14 @@ async def sign_up_product(self, product_id: int, product_name: str): return {"success": False, "error": str(e)} async def purchase_product(self, agent_id, purchase_message): + r""" + Handle a purchase action by a user. + + Args: + agent_id (int): The agent ID to take a purchase action. + purchase_message (tuple): Tuple containing purchase info + (product_name: str, purchase_num: int). + """ product_name, purchase_num = purchase_message if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( @@ -252,6 +311,13 @@ async def purchase_product(self, agent_id, purchase_message): # return {"success": False, "error": str(e)} async def refresh(self, agent_id: int): + r""" + Refresh the information for a given user by retrieving + recommended or following posts. + + Args: + agent_id (int): Agent identifier. + """ # Retrieve posts for a specific id from the rec table if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( @@ -283,7 +349,7 @@ async def refresh(self, agent_id: int): "post.created_at, post.num_likes FROM post " "JOIN follow ON post.user_id = follow.followee_id " "WHERE follow.follower_id = ? " - "ORDER BY post.num_likes DESC " + "ORDER BY post.num_likes DESC " "LIMIT ?") self.pl_utils._execute_db_command( query_following_post, @@ -322,6 +388,9 @@ async def refresh(self, agent_id: int): return {"success": False, "error": str(e)} async def update_rec_table(self): + r""" + Update the recommendation table with new recommended posts. + """ # Recsys(trace/user/post table), refresh rec table twitter_log.info("Starting to refresh recommendation system cache...") user_table = fetch_table_from_db(self.db_cursor, "user") @@ -394,6 +463,13 @@ async def update_rec_table(self): ) async def create_post(self, agent_id: int, content: str): + r""" + Insert a new record into the post table by a specific user. + + Args: + agent_id (int): Agent identifier. + content (str): The content of the new post. + """ if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( datetime.now(), self.start_time) @@ -424,6 +500,14 @@ async def create_post(self, agent_id: int, content: str): return {"success": False, "error": str(e)} async def repost(self, agent_id: int, post_id: int): + r""" + Repost an existing post by a user and then update the + information in the table. + + Args: + agent_id (int): The agent ID to take repost action. + post_id (int): The post ID to be reposted. + """ if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( datetime.now(), self.start_time) @@ -498,6 +582,16 @@ async def repost(self, agent_id: int, post_id: int): return {"success": False, "error": str(e)} async def quote_post(self, agent_id: int, quote_message: tuple): + r""" + Quote an existing post with the given user and update the + number of shares of the root post. + + Args: + agent_id (int): The agent ID to take quote action. + quote_message (tuple): A tuple containing + - post_id (int): The post ID to be quoted. + - quote_content (str): The content of the quoted message. + """ post_id, quote_content = quote_message if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( @@ -557,6 +651,15 @@ async def quote_post(self, agent_id: int, quote_message: tuple): return {"success": False, "error": str(e)} async def like_post(self, agent_id: int, post_id: int): + r""" + Like a post by a given user (duplicate likes by the + same user is not allowed) and judge whether the post + is the root one beforing updateing the table. + + Args: + agent_id (int): The agent ID to take like action. + post_id (int): The post ID to be liked. + """ if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( datetime.now(), self.start_time) @@ -612,6 +715,14 @@ async def like_post(self, agent_id: int, post_id: int): return {"success": False, "error": str(e)} async def unlike_post(self, agent_id: int, post_id: int): + r""" + Remove a like from a post for a specific user + based on the root post and update the table. + + Args: + agent_id (int): The agent ID to take unlike action. + post_id (int): The post ID to be unliked. + """ try: post_type_result = self.pl_utils._get_post_type(post_id) if post_type_result['type'] == 'repost': @@ -661,6 +772,15 @@ async def unlike_post(self, agent_id: int, post_id: int): return {"success": False, "error": str(e)} async def dislike_post(self, agent_id: int, post_id: int): + r""" + Disike a post by a given user (duplicate dislikes by the + same user is not allowed) and judge whether the post + is the root one beforing updateing the table. + + Args: + agent_id (int): The agent ID to take dislike action. + post_id (int): The post ID to be dislike. + """ if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( datetime.now(), self.start_time) @@ -716,6 +836,15 @@ async def dislike_post(self, agent_id: int, post_id: int): return {"success": False, "error": str(e)} async def undo_dislike_post(self, agent_id: int, post_id: int): + r""" + Remove a dislike from a post by a specific user and judge + whether the post is the root one beforing updateing the table. + + Args: + agent_id (int): The agent ID to undo a dislike action. + post_id (int): Post identifier for the one dislike is + being removed. + """ try: post_type_result = self.pl_utils._get_post_type(post_id) if post_type_result['type'] == 'repost': @@ -767,6 +896,13 @@ async def undo_dislike_post(self, agent_id: int, post_id: int): return {"success": False, "error": str(e)} async def search_posts(self, agent_id: int, query: str): + r""" + Retrieve posts based on agent ID and post information. + + Args: + agent_id (int): Agent identifier. + query (str): The search term to look for in posts. + """ try: user_id = agent_id # Update the SQL query to search by content, post_id, and user_id @@ -804,6 +940,13 @@ async def search_posts(self, agent_id: int, query: str): return {"success": False, "error": str(e)} async def search_user(self, agent_id: int, query: str): + r""" + Retrieve users based on agent ID and user information. + + Args: + agent_id (int): Agent identifier. + query (str): The search term to look for in users. + """ try: user_id = agent_id sql_query = ( @@ -853,6 +996,14 @@ async def search_user(self, agent_id: int, query: str): return {"success": False, "error": str(e)} async def follow(self, agent_id: int, followee_id: int): + r""" + Follow another user while prevents duplicate following + from the same user and update the follower count. + + Args: + agent_id (int): The agent ID to take follow action. + followee_id (int): The followee ID to be followed. + """ if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( datetime.now(), self.start_time) @@ -910,6 +1061,14 @@ async def follow(self, agent_id: int, followee_id: int): return {"success": False, "error": str(e)} async def unfollow(self, agent_id: int, followee_id: int): + r""" + Unfollow a previously followed user and update the follower + count and record. + + Args: + agent_id (int): The agent ID to take unfollow action. + followee_id (int): The followee ID to be unfollowed. + """ try: user_id = agent_id # Check for the existence of a follow record and get its ID @@ -960,6 +1119,14 @@ async def unfollow(self, agent_id: int, followee_id: int): return {"success": False, "error": str(e)} async def mute(self, agent_id: int, mutee_id: int): + r""" + Mute another user while preventing duplicate mute records + by the same user and update the mute record. + + Args: + agent_id (int): The agent ID to take the mute action. + mutee_id (int): The mutee ID to be muted. + """ if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( datetime.now(), self.start_time) @@ -997,6 +1164,13 @@ async def mute(self, agent_id: int, mutee_id: int): return {"success": False, "error": str(e)} async def unmute(self, agent_id: int, mutee_id: int): + r""" + Unmute a previously muted user and update the mute record. + + Args: + agent_id (int): The agent ID to take unmute action. + mutee_id (int): The mutee ID to be unmuted. + """ try: user_id = agent_id # Check for the specified mute record and get mute_id @@ -1024,8 +1198,12 @@ async def unmute(self, agent_id: int, mutee_id: int): return {"success": False, "error": str(e)} async def trend(self, agent_id: int): - """ - Get the top K trending posts in the last num_days days. + r""" + Get the top K trending posts in the last num_days days based on + the highest number of likes. + + Args: + agent_id (int): Agent identifier. """ if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( @@ -1073,6 +1251,16 @@ async def trend(self, agent_id: int): return {"success": False, "error": str(e)} async def create_comment(self, agent_id: int, comment_message: tuple): + r""" + Create a new comment on a post by the given user and judge + whether the post is the root one befor incerting the comment. + + Args: + agent_id (int): The agent ID which created the comment. + comment_message (tuple): A tuple contains: + - post_id (int): The post ID to be commented on. + - content (str): The text content of the comment. + """ post_id, content = comment_message if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( @@ -1107,6 +1295,14 @@ async def create_comment(self, agent_id: int, comment_message: tuple): return {"success": False, "error": str(e)} async def like_comment(self, agent_id: int, comment_id: int): + r""" + Like a comment below the post by a given user and update the + comment's like count and record. + + Args: + agent_id (int): The agent ID to like a comment. + comment_id (int): The comment ID to be liked. + """ if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( datetime.now(), self.start_time) @@ -1165,6 +1361,14 @@ async def like_comment(self, agent_id: int, comment_id: int): return {"success": False, "error": str(e)} async def unlike_comment(self, agent_id: int, comment_id: int): + r""" + Remove a like from a comment by a given user and update the + comment's like count and record. + + Args: + agent_id (int): The agent ID to unlike a comment. + comment_id (int): The comment ID to be unliked. + """ try: user_id = agent_id @@ -1215,6 +1419,14 @@ async def unlike_comment(self, agent_id: int, comment_id: int): return {"success": False, "error": str(e)} async def dislike_comment(self, agent_id: int, comment_id: int): + r""" + Dislike a comment by a given user while preventing the duplicate + dislike and update the dislikes count and record. + + Args: + agent_id (int): The agent ID to dislike a comment. + comment_id (int): The comment ID to be disliked. + """ if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( datetime.now(), self.start_time) @@ -1274,6 +1486,14 @@ async def dislike_comment(self, agent_id: int, comment_id: int): return {"success": False, "error": str(e)} async def undo_dislike_comment(self, agent_id: int, comment_id: int): + r""" + Undo a dislike on a comment by a given user and upate the dislikes + count and record. + + Args: + agent_id (int): The agent ID to undo a dislike for the comment. + comment_id (int): The comment ID to be cancelled disliked. + """ if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( datetime.now(), self.start_time) @@ -1326,6 +1546,13 @@ async def undo_dislike_comment(self, agent_id: int, comment_id: int): return {"success": False, "error": str(e)} async def do_nothing(self, agent_id: int): + r""" + This function does not change any database state, but only + logs the None action nto the trace table with the current time. + + Args: + agent_id (int): The agent ID to take no action. + """ if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( datetime.now(), self.start_time) @@ -1342,7 +1569,7 @@ async def do_nothing(self, agent_id: int): return {"success": False, "error": str(e)} async def interview(self, agent_id: int, interview_data): - """Interview an agent with the given prompt and record the response. + r"""Interview an agent with the given prompt and record the response. Args: agent_id (int): The ID of the agent being interviewed. @@ -1387,7 +1614,71 @@ async def interview(self, agent_id: int, interview_data): except Exception as e: return {"success": False, "error": str(e)} + async def report_post(self, agent_id: int, report_message: tuple): + post_id, report_reason = report_message + if self.recsys_type == RecsysType.REDDIT: + current_time = self.sandbox_clock.time_transfer( + datetime.now(), self.start_time) + else: + current_time = self.sandbox_clock.get_time_step() + try: + user_id = agent_id + post_type_result = self.pl_utils._get_post_type(post_id) + + # Check if a report record already exists + check_report_query = ( + "SELECT * FROM report WHERE user_id = ? AND post_id = ?") + self.pl_utils._execute_db_command(check_report_query, + (user_id, post_id)) + if self.db_cursor.fetchone(): + return { + "success": False, + "error": "Report record already exists." + } + + if not post_type_result: + return {"success": False, "error": "Post not found."} + + # Update the number of reports in the post table + update_reports_query = ( + "UPDATE post SET num_reports = num_reports + 1 WHERE " + "post_id = ?") + self.pl_utils._execute_db_command(update_reports_query, + (post_id, ), + commit=True) + + # Add a report in the report table + report_insert_query = ( + "INSERT INTO report (post_id, user_id, report_reason, " + "created_at) VALUES (?, ?, ?, ?)") + self.pl_utils._execute_db_command( + report_insert_query, + (post_id, user_id, report_reason, current_time), + commit=True) + + # Get the ID of the newly inserted report record + report_id = self.db_cursor.lastrowid + + # Record the action in the trace table + action_info = {"post_id": post_id, "report_id": report_id} + self.pl_utils._record_trace(user_id, ActionType.REPORT_POST.value, + action_info, current_time) + + return {"success": True, "report_id": report_id} + except Exception as e: + return {"success": False, "error": str(e)} + async def send_to_group(self, agent_id: int, message: tuple): + r""" + Checks if the user is a member of the group before sending + the message to the group and update the information. + + Args: + agent_id (int): The agent ID to receive group message. + message (tuple): A tuple contains + - group_id (int): Group identifier. + - content (str): Message text. + """ group_id, content = message if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( @@ -1396,7 +1687,6 @@ async def send_to_group(self, agent_id: int, message: tuple): current_time = self.sandbox_clock.get_time_step() try: user_id = agent_id - # check if user is a member of the group check_query = ("SELECT * FROM group_members WHERE group_id = ? " "AND agent_id = ?") @@ -1438,6 +1728,13 @@ async def send_to_group(self, agent_id: int, message: tuple): return {"success": False, "error": str(e)} async def create_group(self, agent_id: int, group_name: str): + r""" + Create a new group chat and add the creator as a member. + + Args: + agent_id (int): The agent ID to create the group. + group_name (str): The name of the new group. + """ if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( datetime.now(), self.start_time) @@ -1472,6 +1769,14 @@ async def create_group(self, agent_id: int, group_name: str): return {"success": False, "error": str(e)} async def join_group(self, agent_id: int, group_id: int): + r""" + Join an existing group chat while preventing duplicate + join by the same user and update the membership record. + + Args: + agent_id (int): The agent ID to join the group. + group_id (int): Group identifier of the target one. + """ if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( datetime.now(), self.start_time) @@ -1516,6 +1821,13 @@ async def join_group(self, agent_id: int, group_id: int): return {"success": False, "error": str(e)} async def leave_group(self, agent_id: int, group_id: int): + r""" + Remove the given user from the group. + + Args: + agent_id (int): The agent ID to be removed. + group_id (int): Group identifier of the group user left. + """ try: user_id = agent_id @@ -1545,6 +1857,12 @@ async def leave_group(self, agent_id: int, group_id: int): return {"success": False, "error": str(e)} async def listen_from_group(self, agent_id: int): + r""" + Retrieve all messages from the groups that a user is a member of. + + Args: + agent_id (int): Agent identifier. + """ try: # get all groups Dict[group_id, group_name] query = """ SELECT * FROM chat_group """ diff --git a/oasis/social_platform/platform_utils.py b/oasis/social_platform/platform_utils.py index 6e053ba8..190660d9 100644 --- a/oasis/social_platform/platform_utils.py +++ b/oasis/social_platform/platform_utils.py @@ -18,18 +18,48 @@ class PlatformUtils: + r""" + Utility class for handling platform-related database operations, + post/comment processing, and action trace recording in a social + platform environment. + """ - def __init__(self, db, db_cursor, start_time, sandbox_clock, show_score, - recsys_type): + def __init__(self, + db, + db_cursor, + start_time, + sandbox_clock, + show_score, + recsys_type, + report_threshold=1): + r''' + Args: + db (sqlite3.Connection): Database connection object. + db_cursor (sqlite3.Cursor): Cursor object for executing queries. + start_time (datetime): Simulation start time. + sandbox_clock (Any): Object providing time control in the sandbox. + show_score (bool): Whether to display score (likes - dislikes) + instead of raw like/dislike counts. + recsys_type (RecsysType): Recommender system type. + report_threshold (int): The number of reports to trigger a + warning message. + ''' self.db = db self.db_cursor = db_cursor self.start_time = start_time self.sandbox_clock = sandbox_clock self.show_score = show_score self.recsys_type = recsys_type + self.report_threshold = report_threshold @staticmethod def _not_signup_error_message(agent_id): + r""" + Generate an error message when an agent has not signed up. + + Args: + agent_id (int): The ID of Agent which is not signed. + """ return { "success": False, @@ -38,18 +68,41 @@ def _not_signup_error_message(agent_id): } def _execute_db_command(self, command, args=(), commit=False): + r""" + Execute a single SQL command. + + Args: + command (str): SQL command string. + args (tuple): Arguments for the SQL command. + commit (bool): Whether to commit after execution. + """ self.db_cursor.execute(command, args) if commit: self.db.commit() return self.db_cursor def _execute_many_db_command(self, command, args_list, commit=False): + r""" + Execute a batch of SQL commands. + + Args: + command (str): SQL command string. + args_list (list): List of arguments for the SQL commands. + commit (bool): Whether to commit after execution. + """ self.db_cursor.executemany(command, args_list) if commit: self.db.commit() return self.db_cursor def _check_agent_userid(self, agent_id): + r""" + Retrieve the user_id associated with a given agent_id. + + Args: + agent_id (int): Agent identifier. + """ + try: user_query = "SELECT user_id FROM user WHERE agent_id = ?" results = self._execute_db_command(user_query, (agent_id, )) @@ -66,6 +119,12 @@ def _check_agent_userid(self, agent_id): return None def _add_comments_to_posts(self, posts_results): + r""" + Process posts query results and attach related comments. + + Args: + posts_results (list[tuple]): List of rows from the post table. + """ # Initialize the returned posts list posts = [] for row in posts_results: @@ -84,11 +143,11 @@ def _add_comments_to_posts(self, posts_results): post_id = post_type_result["root_post_id"] self.db_cursor.execute( "SELECT content, quote_content, created_at, num_likes, " - "num_dislikes, num_shares FROM post WHERE post_id = ?", - (post_id, )) + "num_dislikes, num_shares, num_reports FROM post " + "WHERE post_id = ?", (post_id, )) original_post_result = self.db_cursor.fetchone() (content, quote_content, created_at, num_likes, num_dislikes, - num_shares) = original_post_result + num_shares, num_reports) = original_post_result post_content = ( f"User {user_id} reposted a post from User " f"{original_user_id}. Repost content: {content}. ") @@ -104,6 +163,11 @@ def _add_comments_to_posts(self, posts_results): elif post_type_result["type"] == "common": post_content = content + # Get num_reports for common posts + self.db_cursor.execute( + "SELECT num_reports FROM post WHERE post_id = ?", + (post_id, )) + num_reports = self.db_cursor.fetchone()[0] # For each post, query its corresponding comments self.db_cursor.execute( @@ -141,6 +205,12 @@ def _add_comments_to_posts(self, posts_results): num_dislikes, ) in comments_results] + # Add warning message if the post has been reported + if num_reports >= self.report_threshold: + warning_message = ("[Warning: This post has been reported" + f" {num_reports} times]") + post_content = f"{warning_message}\n{post_content}" + # Add post information and corresponding comments to the posts list posts.append({ "post_id": @@ -160,6 +230,8 @@ def _add_comments_to_posts(self, posts_results): }), "num_shares": num_shares, + "num_reports": + num_reports, "comments": comments, }) @@ -179,6 +251,13 @@ def _record_trace(self, If only the trace table needs to record time, use the entry time into _record_trace as the time for the trace record. + + Args: + user_id (int): User who performed the action. + action_type (str): Type of action. + action_info (dict): Additional info for the action. + current_time (datetime | int, optional): Time of the action. + If None, sandbox clock provides the current time. """ if self.recsys_type == RecsysType.REDDIT: current_time = self.sandbox_clock.time_transfer( @@ -197,6 +276,13 @@ def _record_trace(self, ) def _check_self_post_rating(self, post_id, user_id): + r""" + Check if a user is allowed to like/dislike their post. + + Args: + post_id (int): Post identifier. + user_id (int): User identifier. + """ self_like_check_query = "SELECT user_id FROM post WHERE post_id = ?" self._execute_db_command(self_like_check_query, (post_id, )) result = self.db_cursor.fetchone() @@ -208,6 +294,13 @@ def _check_self_post_rating(self, post_id, user_id): return None def _check_self_comment_rating(self, comment_id, user_id): + r""" + Check if a user is allowed to like/dislike their comment. + + Args: + comment_id (int): Comment identifier. + user_id (int): User identifier. + """ self_like_check_query = ("SELECT user_id FROM comment WHERE " "comment_id = ?") self._execute_db_command(self_like_check_query, (comment_id, )) @@ -220,6 +313,13 @@ def _check_self_comment_rating(self, comment_id, user_id): return None def _get_post_type(self, post_id: int): + r""" + Retrieve the type of a post (common, repost, or quote). + + Args: + post_id (int): Post identifier. + """ + query = ( "SELECT original_post_id, quote_content FROM post WHERE post_id " "= ?") diff --git a/oasis/social_platform/process_recsys_posts.py b/oasis/social_platform/process_recsys_posts.py index 6181f94b..dc9e8bbf 100644 --- a/oasis/social_platform/process_recsys_posts.py +++ b/oasis/social_platform/process_recsys_posts.py @@ -23,6 +23,14 @@ @torch.no_grad() def process_batch(model: AutoModel, tokenizer: AutoTokenizer, batch_texts: List[str]): + r"""Process a batch of texts and return pooled embeddings. + + Args: + model (AutoModel): The Automodel used for encoding. + tokenizer (AutoTokenizer): The AutoTokenizer corresponding + to the model. + batch_texts (List[str]): List of text strings in the batch. + """ device = torch.device("cuda" if torch.cuda.is_available() else "cpu") inputs = tokenizer(batch_texts, return_tensors="pt", @@ -35,6 +43,15 @@ def process_batch(model: AutoModel, tokenizer: AutoTokenizer, def generate_post_vector(model: AutoModel, tokenizer: AutoTokenizer, texts, batch_size): + r"""Generate embeddings for a list of texts using the model. + + Args: + model (AutoModel): The Automodel used for encoding. + tokenizer (AutoTokenizer): The AutoTokenizer corresponding + to the model. + texts (List[str]): List of text strings to embed. + batch_size (int): Number of texts to process per batch. + """ # Loop through all messages # If the list of messages is too large, process them in batches. all_outputs = [] @@ -47,12 +64,12 @@ def generate_post_vector(model: AutoModel, tokenizer: AutoTokenizer, texts, def generate_post_vector_openai(texts: List[str], batch_size: int = 100): - """ - Generate embeddings using OpenAI API + r""" + Generate embeddings with OpenAI API. Args: - texts: List of texts to process - batch_size: Size of each batch + texts (List[str]): List of texts to process. + batch_size (int): Size of each batch. """ openai_embedding = OpenAIEmbedding( model_type=EmbeddingModelType.TEXT_EMBEDDING_3_SMALL) diff --git a/oasis/social_platform/recsys.py b/oasis/social_platform/recsys.py index 9d9429cb..7f371206 100644 --- a/oasis/social_platform/recsys.py +++ b/oasis/social_platform/recsys.py @@ -62,6 +62,12 @@ def get_twhin_tokenizer(): + r"""Lazy initialize the tokenizer for the TWHiN-BERT model + which has a maximum sequence length of 512 tokens. + + Returns: + AutoTokenizer: The tokenizer for TWHiN-BERT. + """ global twhin_tokenizer if twhin_tokenizer is None: from transformers import AutoTokenizer @@ -72,6 +78,14 @@ def get_twhin_tokenizer(): def get_twhin_model(device): + r"""Lazy initialize the TWHiN-BERT model on the given device. + + Args: + device (torch.device): The device where the model should be loaded. + + Returns: + AutoModel: The TWHiN-BERT model loaded on the specified device. + """ global twhin_model if twhin_model is None: from transformers import AutoModel @@ -81,6 +95,19 @@ def get_twhin_model(device): def load_model(model_name): + r"""Load and return the specified model by name. + + Args: + model_name (str): The name of the model to load. + Paraphrase-MiniLM-L6-v2 and twhin-bert-base are supported. + + Returns: + Union: [SentenceTransformer, Tuple[AutoTokenizer, AutoModel]]: + - SentenceTransformer instance if `model_name` is + 'paraphrase-MiniLM-L6-v2'. + - (tokenizer, model) tuple if `model_name` is + 'Twitter/twhin-bert-base'. + """ try: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model_name == 'paraphrase-MiniLM-L6-v2': @@ -98,6 +125,11 @@ def load_model(model_name): def get_recsys_model(recsys_type: str = None): + r"""Retrieve the recommender system model based on the specified type. + + Args: + recsys_type (str, optional): The type of recommender system. + """ if recsys_type == RecsysType.TWITTER.value: model = load_model('paraphrase-MiniLM-L6-v2') return model @@ -202,6 +234,21 @@ def get_recommendations( score, top_n=100, ): + r""" + Fetch top-K recommendations for a given user based on + cosine similarity scores. + + Args: + user_index (int): Index of the user in the cosine similarity matrix. + cosine_similarities (List[List]): Represent the similarity between + each user and item. + items (dict): Dictionary mapping item indices to item identifiers. + score (List): Weighting scores applied to the similarity values. + top_n (int): Number of top items to return. Defaults is 100. + Returns: + List: A list of tuples (item_identifier, weighted_similarity_score). + Sorted in descending order of similarity. + """ similarities = np.array(cosine_similarities[user_index]) similarities = similarities * score top_item_indices = similarities.argsort()[::-1][:top_n] @@ -387,6 +434,14 @@ def get_like_post_id(user_id, action, trace_table): # Calculate the average cosine similarity between liked posts and target posts def calculate_like_similarity(liked_vectors, target_vectors): + r""" + Compute cosine similarity between each target vector and all + liked vectors, then averages the similarity scores per target. + + Args: + liked_vectors (List[List]): Representing the embeddings of liked posts. + target_vectors (List): epresenting the embeddings of target posts. + """ # Calculate the norms of the vectors liked_norms = np.linalg.norm(liked_vectors, axis=1) target_norms = np.linalg.norm(target_vectors, axis=1) @@ -428,6 +483,31 @@ def rec_sys_personalized_twh( recall_only: bool = False, enable_like_score: bool = False, use_openai_embedding: bool = False) -> List[List]: + r""" + Generate personalized post recommendations for users on a social platform + based on a hybrid strategy. + + Args: + user_table (List[Dict[str, Any]]): List of user dictionaries + contains user information. + post_table (List[Dict[str, Any]]): List of post dictionaries + contains post information. + latest_post_count (int): Number of most recent posts to update + into the system. + trace_table (List[Dict[str, Any]]): Interaction log. + rec_matrix (List[List]): Existing recommendation matrix. + max_rec_post_len (int): Maximum number of recommended posts per user. + current_time (int): Current simulation or clock time. + recall_only (bool): Whether to return candidate recall results + without ranking. + enable_like_score (bool): Whether to include similarity to previously + liked posts in scoring. + use_openai_embedding (bool): Whether to use OpenAI embeddings + instead of TWHIN-BERT for encoding. + + Returns: + List[List]: Updated recommendations. + """ global twhin_model, twhin_tokenizer if twhin_model is None or twhin_tokenizer is None: twhin_tokenizer, twhin_model = get_recsys_model( diff --git a/oasis/social_platform/schema/post.sql b/oasis/social_platform/schema/post.sql index 1a28469f..d668299b 100644 --- a/oasis/social_platform/schema/post.sql +++ b/oasis/social_platform/schema/post.sql @@ -10,6 +10,7 @@ CREATE TABLE post ( num_likes INTEGER DEFAULT 0, num_dislikes INTEGER DEFAULT 0, num_shares INTEGER DEFAULT 0, -- num_shares = num_reposts + num_quotes + num_reports INTEGER DEFAULT 0, FOREIGN KEY(user_id) REFERENCES user(user_id), FOREIGN KEY(original_post_id) REFERENCES post(post_id) ); diff --git a/oasis/social_platform/schema/report.sql b/oasis/social_platform/schema/report.sql new file mode 100644 index 00000000..232c2e0a --- /dev/null +++ b/oasis/social_platform/schema/report.sql @@ -0,0 +1,10 @@ +-- This is the schema definition for the report table +CREATE TABLE report ( + report_id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER, + post_id INTEGER, + report_reason TEXT, + created_at DATETIME, + FOREIGN KEY(user_id) REFERENCES user(user_id), + FOREIGN KEY(post_id) REFERENCES post(post_id) +); diff --git a/oasis/social_platform/typing.py b/oasis/social_platform/typing.py index 55e91a42..ed1c4bed 100644 --- a/oasis/social_platform/typing.py +++ b/oasis/social_platform/typing.py @@ -15,6 +15,9 @@ class ActionType(Enum): + r""" + Enumeration of possible user actions in the social platform. + """ EXIT = "exit" REFRESH = "refresh" SEARCH_USER = "search_user" @@ -24,6 +27,7 @@ class ActionType(Enum): UNLIKE_POST = "unlike_post" DISLIKE_POST = "dislike_post" UNDO_DISLIKE_POST = "undo_dislike_post" + REPORT_POST = "report_post" FOLLOW = "follow" UNFOLLOW = "unfollow" MUTE = "mute" @@ -49,6 +53,9 @@ class ActionType(Enum): @classmethod def get_default_twitter_actions(cls): + r""" + Retrieve set of actions available on a Twitter-like platform. + """ return [ cls.CREATE_POST, cls.LIKE_POST, @@ -60,6 +67,9 @@ def get_default_twitter_actions(cls): @classmethod def get_default_reddit_actions(cls): + r""" + Retrieve set of actions available on a Reddit-like platform. + """ return [ cls.LIKE_POST, cls.DISLIKE_POST, @@ -78,6 +88,9 @@ def get_default_reddit_actions(cls): class RecsysType(Enum): + r""" + Types of recommendation systems. + """ TWITTER = "twitter" TWHIN = "twhin-bert" REDDIT = "reddit" @@ -85,5 +98,8 @@ class RecsysType(Enum): class DefaultPlatformType(Enum): + r""" + List of possible social platform. + """ TWITTER = "twitter" REDDIT = "reddit" diff --git a/poetry.lock b/poetry.lock index 1c6d2707..986cac62 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. [[package]] name = "aiofiles" @@ -6,6 +6,7 @@ version = "24.1.0" description = "File support for asyncio." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5"}, {file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"}, @@ -17,6 +18,7 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -28,6 +30,7 @@ version = "4.9.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"}, {file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"}, @@ -41,7 +44,7 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] -test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"] +test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""] trio = ["trio (>=0.26.1)"] [[package]] @@ -50,18 +53,19 @@ version = "25.3.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, ] [package.extras] -benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] -tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] [[package]] name = "backoff" @@ -69,6 +73,7 @@ version = "2.2.1" description = "Function decoration for backoff and retry" optional = false python-versions = ">=3.7,<4.0" +groups = ["main"] files = [ {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, @@ -80,6 +85,7 @@ version = "4.13.4" description = "Screen-scraping library" optional = false python-versions = ">=3.7.0" +groups = ["main"] files = [ {file = "beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b"}, {file = "beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195"}, @@ -102,6 +108,7 @@ version = "1.7.1" description = "cffi-based cairo bindings for Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "cairocffi-1.7.1-py3-none-any.whl", hash = "sha256:9803a0e11f6c962f3b0ae2ec8ba6ae45e957a146a004697a1ac1bbf16b073b3f"}, {file = "cairocffi-1.7.1.tar.gz", hash = "sha256:2e48ee864884ec4a3a34bfa8c9ab9999f688286eb714a15a43ec9d068c36557b"}, @@ -117,13 +124,14 @@ xcb = ["xcffib (>=1.4.0)"] [[package]] name = "camel-ai" -version = "0.2.62" +version = "0.2.70" description = "Communicative Agents for AI Society Study" optional = false python-versions = "<3.13,>=3.10" +groups = ["main"] files = [ - {file = "camel_ai-0.2.62-py3-none-any.whl", hash = "sha256:8a6d0a2f31d285158e58499ec8d87d20444644cd70af0d12e2099659a9c3944e"}, - {file = "camel_ai-0.2.62.tar.gz", hash = "sha256:40b8a6a05da079ad6d08ed032661af535803ef92783bb5cba17e0780eda1a2e0"}, + {file = "camel_ai-0.2.70-py3-none-any.whl", hash = "sha256:3e551e80eacf98d343b279eb02a4402ac37a6ec385b6b1e0210e3b4f2b01695c"}, + {file = "camel_ai-0.2.70.tar.gz", hash = "sha256:8f56bf7e180e641568d217665d7bde07fdf9a83ede024739fafe0ca6400cd883"}, ] [package.dependencies] @@ -139,32 +147,32 @@ pydantic = ">=2.10.6" tiktoken = ">=0.7.0,<0.8" [package.extras] -all = ["accelerate (>=0.26.0,<0.27)", "aci-sdk (>=1.0.0b1)", "agentops (>=0.3.21,<0.4)", "aiosqlite (>=0.20.0,<0.21)", "anthropic (>=0.47.0,<0.50.0)", "apify-client (>=1.8.1,<2)", "arxiv (>=2.1.3,<3)", "arxiv2text (>=0.1.14,<0.2)", "azure-storage-blob (>=12.21.0,<13)", "beautifulsoup4 (>=4,<5)", "botocore (>=1.35.3,<2)", "chunkr-ai (>=0.0.50)", "cohere (>=5.11.0,<6)", "crawl4ai (>=0.3.745)", "dappier (>=0.3.3,<0.4)", "datacommons (>=1.4.3,<2)", "datacommons-pandas (>=0.0.3,<0.0.4)", "datasets (>=3,<4)", "daytona-sdk (==0.14.0)", "diffusers (>=0.25.0,<0.26)", "discord-py (>=2.3.2,<3)", "docker (>=7.1.0,<8)", "docx (>=0.2.4)", "docx2txt (>=0.8,<0.9)", "duckduckgo-search (>=6.3.5,<7)", "e2b-code-interpreter (>=1.0.3,<2)", "exa-py (>=1.10.0,<2)", "faiss-cpu (>=1.7.2,<2)", "fastapi (>=0.115.11)", "ffmpeg-python (>=0.2.0,<0.3)", "firecrawl-py (>=1.0.0,<2)", "fish-audio-sdk (>=2024.12.5,<2025)", "fpdf (>=1.7.2)", "google-api-python-client (==2.166.0)", "google-auth-httplib2 (==0.2.0)", "google-auth-oauthlib (==1.2.1)", "google-cloud-storage (>=2.18.0,<3)", "google-genai (>=1.13.0)", "googlemaps (>=4.10.0,<5)", "gradio (>=3,<4)", "html2text (>=2024.2.26)", "ibm-watsonx-ai (>=1.3.11)", "imageio[pyav] (>=2.34.2,<3)", "ipykernel (>=6.0.0,<7)", "jupyter-client (>=8.6.2,<9)", "linkup-sdk (>=0.2.1,<0.3)", "litellm (>=1.38.1,<2)", "markitdown (==0.1.1)", "math-verify (>=0.7.0,<0.8)", "mcp (>=1.3.0)", "mem0ai (>=0.1.67)", "mistralai (>=1.1.0,<2)", "mock (>=5,<6)", "mypy (>=1.5.1,<2)", "nebula3-python (==3.8.2)", "neo4j (>=5.18.0,<6)", "networkx (>=3.4.2,<4)", "newspaper3k (>=0.2.8,<0.3)", "notion-client (>=2.2.1,<3)", "numpy (>=1.2,<=2.2)", "openapi-spec-validator (>=0.7.1,<0.8)", "opencv-python (>=4,<5)", "openpyxl (>=3.1.5)", "outlines (>=0.1.7,<0.2)", "pandas (>=1.5.3,<2)", "pandasai (>=2.3.0,<3)", "playwright (>=1.50.0)", "prance (>=23.6.21.0,<24)", "praw (>=7.7.1,<8)", "pre-commit (>=3,<4)", "pyautogui (>=0.9.54,<0.10)", "pydub (>=0.25.1,<0.26)", "pygithub (>=2.6.0,<3)", "pylatex (>=1.4.2)", "pymilvus (>=2.4.0,<3)", "pymupdf (>=1.22.5,<2)", "pyobvector (>=0.1.18)", "pyowm (>=3.3.0,<4)", "pytelegrambotapi (>=4.18.0,<5)", "pytest (>=7,<8)", "pytest-asyncio (>=0.23.0,<0.24)", "pytest-cov (>=4,<5)", "python-pptx (>=1.0.2)", "pytidb-experimental (==0.0.1.dev4)", "qdrant-client (>=1.9.0,<2)", "rank-bm25 (>=0.2.2,<0.3)", "redis (>=5.0.6,<6)", "reka-api (>=3.0.8,<4)", "requests-oauthlib (>=1.3.1,<2)", "rouge (>=1.0.1,<2)", "scenedetect (>=0.6.5.2)", "scholarly[tor] (==1.7.11)", "scrapegraph-py (>=1.12.0,<2)", "sentence-transformers (>=3.0.1,<4)", "sentencepiece (>=0.2,<0.3)", "slack-bolt (>=1.20.1,<2)", "slack-sdk (>=3.27.2,<4)", "soundfile (>=0.13,<0.14)", "stripe (>=11.3.0,<12)", "sympy (>=1.13.3,<2)", "tabulate (>=0.9.0)", "tavily-python (>=0.5.0,<0.6)", "textblob (>=0.17.1,<0.18)", "torch", "transformers (>=4,<5)", "tree-sitter (>=0.23.2,<0.24)", "tree-sitter-python (>=0.23.6,<0.24)", "typer (>=0.15.2)", "types-colorama (>=0.4.15,<0.5)", "types-mock (>=5.1.0,<6)", "types-pyyaml (>=6.0.12,<7)", "types-requests (>=2.31.0,<3)", "types-setuptools (>=69.2.0,<70)", "types-tqdm (>=4.66.0,<5)", "unstructured (==0.16.20)", "wikipedia (>=1,<2)", "wolframalpha (>=5.0.0,<6)", "xls2xlsx (>=0.2.0)", "yt-dlp (>=2024.11.4,<2025)"] +all = ["aci-sdk (>=1.0.0b1)", "agentops (>=0.3.21,<0.4)", "aiosqlite (>=0.20.0,<0.21)", "anthropic (>=0.47.0,<0.50.0)", "apify-client (>=1.8.1,<2)", "arxiv (>=2.1.3,<3)", "arxiv2text (>=0.1.14,<0.2)", "azure-storage-blob (>=12.21.0,<13)", "beautifulsoup4 (>=4,<5)", "botocore (>=1.35.3,<2)", "chromadb (>=0.6.0,<1.0.0)", "chunkr-ai (>=0.0.50)", "cohere (>=5.11.0,<6)", "crawl4ai (>=0.3.745)", "dappier (>=0.3.3,<0.4)", "datacommons (>=1.4.3,<2)", "datacommons-pandas (>=0.0.3,<0.0.4)", "datasets (>=3,<4)", "daytona-sdk (>=0.20.0)", "diffusers (>=0.25.0,<0.26)", "discord-py (>=2.3.2,<3)", "docker (>=7.1.0,<8)", "docx (>=0.2.4)", "docx2txt (>=0.8,<0.9)", "duckduckgo-search (>=6.3.5,<7)", "e2b-code-interpreter (>=1.0.3,<2)", "exa-py (>=1.10.0,<2)", "faiss-cpu (>=1.7.2,<2)", "fastapi (>=0.115.11)", "ffmpeg-python (>=0.2.0,<0.3)", "firecrawl-py (>=1.0.0,<2)", "fish-audio-sdk (>=2024.12.5,<2025)", "flask (>=2.0)", "fpdf (>=1.7.2)", "google-api-python-client (==2.166.0)", "google-auth-httplib2 (==0.2.0)", "google-auth-oauthlib (==1.2.1)", "google-cloud-storage (>=2.18.0,<3)", "google-genai (>=1.13.0)", "googlemaps (>=4.10.0,<5)", "gradio (>=3,<4)", "html2text (>=2024.2.26)", "ibm-watsonx-ai (>=1.3.11)", "imageio[pyav] (>=2.34.2,<3)", "ipykernel (>=6.0.0,<7)", "jupyter-client (>=8.6.2,<9)", "langfuse (>=2.60.5)", "linkup-sdk (>=0.2.1,<0.3)", "litellm (>=1.38.1,<2)", "markitdown (==0.1.1)", "math-verify (>=0.7.0,<0.8)", "mcp (>=1.3.0)", "mem0ai (>=0.1.67)", "mistralai (>=1.1.0,<2)", "mock (>=5,<6)", "mypy (>=1.5.1,<2)", "nebula3-python (==3.8.2)", "neo4j (>=5.18.0,<6)", "networkx (>=3.4.2,<4)", "newspaper3k (>=0.2.8,<0.3)", "notion-client (>=2.2.1,<3)", "numpy (>=1.2,<=2.2)", "openapi-spec-validator (>=0.7.1,<0.8)", "openpyxl (>=3.1.5)", "pandas (>=1.5.3,<2)", "pandasai (>=2.3.0,<3)", "pgvector (>=0.2.4,<0.3)", "playwright (>=1.50.0)", "prance (>=23.6.21.0,<24)", "praw (>=7.7.1,<8)", "pre-commit (>=3,<4)", "psycopg[binary] (>=3.1.18,<4)", "pyautogui (>=0.9.54,<0.10)", "pydub (>=0.25.1,<0.26)", "pygithub (>=2.6.0,<3)", "pylatex (>=1.4.2)", "pymilvus (>=2.4.0,<3)", "pymupdf (>=1.22.5,<2)", "pyobvector (>=0.1.18)", "pyowm (>=3.3.0,<4)", "pytelegrambotapi (>=4.18.0,<5)", "pytesseract (>=0.3.13)", "pytest (>=7,<8)", "pytest-asyncio (>=0.23.0,<0.24)", "pytest-cov (>=4,<5)", "python-pptx (>=1.0.2)", "pytidb-experimental (==0.0.1.dev4)", "qdrant-client (>=1.9.0,<2)", "rank-bm25 (>=0.2.2,<0.3)", "redis (>=5.0.6,<6)", "reka-api (>=3.0.8,<4)", "requests-oauthlib (>=1.3.1,<2)", "rlcard (>=1.0.0,<1.3.0)", "rouge (>=1.0.1,<2)", "scenedetect (>=0.6.5.2)", "scholarly[tor] (==1.7.11)", "scrapegraph-py (>=1.12.0,<2)", "sentencepiece (>=0.2,<0.3)", "slack-bolt (>=1.20.1,<2)", "slack-sdk (>=3.27.2,<4)", "soundfile (>=0.13,<0.14)", "stripe (>=11.3.0,<12)", "sympy (>=1.13.3,<2)", "tabulate (>=0.9.0)", "tavily-python (>=0.5.0,<0.6)", "textblob (>=0.17.1,<0.18)", "transformers (>=4,<5)", "tree-sitter (>=0.23.2,<0.24)", "tree-sitter-python (>=0.23.6,<0.24)", "typer (>=0.15.2)", "types-colorama (>=0.4.15,<0.5)", "types-mock (>=5.1.0,<6)", "types-pyyaml (>=6.0.12,<7)", "types-requests (>=2.31.0,<3)", "types-setuptools (>=69.2.0,<70)", "types-tqdm (>=4.66.0,<5)", "unstructured (==0.16.20)", "weaviate-client (>=4.15.0)", "wikipedia (>=1,<2)", "wolframalpha (>=5.0.0,<6)", "xls2xlsx (>=0.2.0)", "yt-dlp (>=2024.11.4,<2025)"] communication-tools = ["discord-py (>=2.3.2,<3)", "notion-client (>=2.2.1,<3)", "praw (>=7.7.1,<8)", "pygithub (>=2.6.0,<3)", "pytelegrambotapi (>=4.18.0,<5)", "slack-bolt (>=1.20.1,<2)", "slack-sdk (>=3.27.2,<4)"] data-tools = ["aiosqlite (>=0.20.0,<0.21)", "datacommons (>=1.4.3,<2)", "datacommons-pandas (>=0.0.3,<0.0.4)", "math-verify (>=0.7.0,<0.8)", "networkx (>=3.4.2,<4)", "numpy (>=1.2,<=2.2)", "pandas (>=1.5.3,<2)", "rouge (>=1.0.1,<2)", "stripe (>=11.3.0,<12)", "textblob (>=0.17.1,<0.18)"] -dev = ["gradio (>=3,<4)", "mock (>=5,<6)", "mypy (>=1.5.1,<2)", "pre-commit (>=3,<4)", "pytest (>=7,<8)", "pytest-asyncio (>=0.23.0,<0.24)", "pytest-cov (>=4,<5)", "ruff (>=0.7,<0.8)", "toml (>=0.10.2)", "types-colorama (>=0.4.15,<0.5)", "types-mock (>=5.1.0,<6)", "types-pyyaml (>=6.0.12,<7)", "types-requests (>=2.31.0,<3)", "types-setuptools (>=69.2.0,<70)", "types-tqdm (>=4.66.0,<5)", "uv (==0.6.5)"] -dev-tools = ["aci-sdk (>=1.0.0b1)", "agentops (>=0.3.21,<0.4)", "daytona-sdk (==0.14.0)", "docker (>=7.1.0,<8)", "e2b-code-interpreter (>=1.0.3,<2)", "ipykernel (>=6.0.0,<7)", "jupyter-client (>=8.6.2,<9)", "mcp (>=1.3.0)", "tree-sitter (>=0.23.2,<0.24)", "tree-sitter-python (>=0.23.6,<0.24)", "typer (>=0.15.2)"] +dev = ["flask (>=2.0)", "gradio (>=3,<4)", "mock (>=5,<6)", "mypy (>=1.5.1,<2)", "pre-commit (>=3,<4)", "pytest (>=7,<8)", "pytest-asyncio (>=0.23.0,<0.24)", "pytest-cov (>=4,<5)", "ruff (>=0.7,<0.8)", "toml (>=0.10.2)", "types-colorama (>=0.4.15,<0.5)", "types-mock (>=5.1.0,<6)", "types-pyyaml (>=6.0.12,<7)", "types-requests (>=2.31.0,<3)", "types-setuptools (>=69.2.0,<70)", "types-tqdm (>=4.66.0,<5)", "uv (>=0.7.0,<0.8)"] +dev-tools = ["aci-sdk (>=1.0.0b1)", "agentops (>=0.3.21,<0.4)", "daytona-sdk (>=0.20.0)", "docker (>=7.1.0,<8)", "e2b-code-interpreter (>=1.0.3,<2)", "ipykernel (>=6.0.0,<7)", "jupyter-client (>=8.6.2,<9)", "langfuse (>=2.60.5)", "mcp (>=1.3.0)", "tree-sitter (>=0.23.2,<0.24)", "tree-sitter-python (>=0.23.6,<0.24)", "typer (>=0.15.2)"] docs = ["docutils (<0.20.0)", "myst-parser", "nbsphinx", "sphinx (>=7,<8)", "sphinx-book-theme", "sphinxext-rediraffe (>=0.2.7,<0.3)"] document-tools = ["beautifulsoup4 (>=4,<5)", "chunkr-ai (>=0.0.50)", "crawl4ai (>=0.3.745)", "docx (>=0.2.4)", "docx2txt (>=0.8,<0.9)", "fpdf (>=1.7.2)", "markitdown (==0.1.1)", "numpy (>=1.2,<=2.2)", "openapi-spec-validator (>=0.7.1,<0.8)", "openpyxl (>=3.1.5)", "pandasai (>=2.3.0,<3)", "prance (>=23.6.21.0,<24)", "pylatex (>=1.4.2)", "pymupdf (>=1.22.5,<2)", "python-pptx (>=1.0.2)", "tabulate (>=0.9.0)", "unstructured (==0.16.20)", "xls2xlsx (>=0.2.0)"] -huggingface = ["accelerate (>=0.26.0,<0.27)", "datasets (>=3,<4)", "diffusers (>=0.25.0,<0.26)", "opencv-python (>=4,<5)", "sentencepiece (>=0.2,<0.3)", "soundfile (>=0.13,<0.14)", "torch", "transformers (>=4,<5)"] -media-tools = ["ffmpeg-python (>=0.2.0,<0.3)", "imageio[pyav] (>=2.34.2,<3)", "pydub (>=0.25.1,<0.26)", "scenedetect (>=0.6.5.2)", "yt-dlp (>=2024.11.4,<2025)"] +huggingface = ["datasets (>=3,<4)", "diffusers (>=0.25.0,<0.26)", "huggingface-hub", "sentencepiece (>=0.2,<0.3)", "soundfile (>=0.13,<0.14)", "transformers (>=4,<5)"] +media-tools = ["ffmpeg-python (>=0.2.0,<0.3)", "imageio[pyav] (>=2.34.2,<3)", "pydub (>=0.25.1,<0.26)", "pytesseract (>=0.3.13)", "scenedetect (>=0.6.5.2)", "yt-dlp (>=2024.11.4,<2025)"] model-platforms = ["anthropic (>=0.47.0,<0.50.0)", "cohere (>=5.11.0,<6)", "fish-audio-sdk (>=2024.12.5,<2025)", "ibm-watsonx-ai (>=1.3.11)", "litellm (>=1.38.1,<2)", "mistralai (>=1.1.0,<2)", "reka-api (>=3.0.8,<4)"] -owl = ["aci-sdk (>=1.0.0b1)", "anthropic (>=0.47.0,<0.50.0)", "beautifulsoup4 (>=4,<5)", "chunkr-ai (>=0.0.41)", "chunkr-ai (>=0.0.50)", "crawl4ai (>=0.3.745)", "datasets (>=3,<4)", "docx (>=0.2.4)", "docx2txt (>=0.8,<0.9)", "duckduckgo-search (>=6.3.5,<7)", "e2b-code-interpreter (>=1.0.3,<2)", "ffmpeg-python (>=0.2.0,<0.3)", "fpdf (>=1.7.2)", "html2text (>=2024.2.26)", "imageio[pyav] (>=2.34.2,<3)", "mcp-server-fetch (==2025.1.17)", "mcp-simple-arxiv (==0.2.2)", "newspaper3k (>=0.2.8,<0.3)", "numpy (>=1.2,<=2.2)", "openapi-spec-validator (>=0.7.1,<0.8)", "opencv-python (>=4,<5)", "openpyxl (>=3.1.5)", "outlines (>=0.1.7,<0.2)", "pandas (>=1.5.3,<2)", "pandasai (>=2.3.0,<3)", "playwright (>=1.50.0)", "prance (>=23.6.21.0,<24)", "pyautogui (>=0.9.54,<0.10)", "pydub (>=0.25.1,<0.26)", "pylatex (>=1.4.2)", "pymupdf (>=1.22.5,<2)", "python-dotenv (>=1.0.0,<2)", "python-pptx (>=1.0.2)", "requests-oauthlib (>=1.3.1,<2)", "rouge (>=1.0.1,<2)", "scenedetect (>=0.6.5.2)", "scrapegraph-py (>=1.12.0,<2)", "sentencepiece (>=0.2,<0.3)", "soundfile (>=0.13,<0.14)", "tabulate (>=0.9.0)", "transformers (>=4,<5)", "tree-sitter (>=0.23.2,<0.24)", "tree-sitter-python (>=0.23.6,<0.24)", "typer (>=0.15.2)", "unstructured (==0.16.20)", "wikipedia (>=1,<2)", "xls2xlsx (>=0.2.0)", "yt-dlp (>=2024.11.4,<2025)"] -rag = ["chunkr-ai (>=0.0.50)", "cohere (>=5.11.0,<6)", "crawl4ai (>=0.3.745)", "faiss-cpu (>=1.7.2,<2)", "google-genai (>=1.13.0)", "nebula3-python (==3.8.2)", "neo4j (>=5.18.0,<6)", "numpy (>=1.2,<=2.2)", "pandasai (>=2.3.0,<3)", "pymilvus (>=2.4.0,<3)", "pyobvector (>=0.1.18)", "pytidb-experimental (==0.0.1.dev4)", "qdrant-client (>=1.9.0,<2)", "rank-bm25 (>=0.2.2,<0.3)", "sentence-transformers (>=3.0.1,<4)", "unstructured (==0.16.20)"] +owl = ["aci-sdk (>=1.0.0b1)", "anthropic (>=0.47.0,<0.50.0)", "beautifulsoup4 (>=4,<5)", "chunkr-ai (>=0.0.41)", "chunkr-ai (>=0.0.50)", "crawl4ai (>=0.3.745)", "datasets (>=3,<4)", "docx (>=0.2.4)", "docx2txt (>=0.8,<0.9)", "duckduckgo-search (>=6.3.5,<7)", "e2b-code-interpreter (>=1.0.3,<2)", "ffmpeg-python (>=0.2.0,<0.3)", "fpdf (>=1.7.2)", "html2text (>=2024.2.26)", "imageio[pyav] (>=2.34.2,<3)", "markitdown (==0.1.1)", "mcp-server-fetch (==2025.1.17)", "mcp-simple-arxiv (==0.2.2)", "newspaper3k (>=0.2.8,<0.3)", "numpy (>=1.2,<=2.2)", "openapi-spec-validator (>=0.7.1,<0.8)", "openpyxl (>=3.1.5)", "pandas (>=1.5.3,<2)", "pandasai (>=2.3.0,<3)", "playwright (>=1.50.0)", "prance (>=23.6.21.0,<24)", "pyautogui (>=0.9.54,<0.10)", "pydub (>=0.25.1,<0.26)", "pylatex (>=1.4.2)", "pymupdf (>=1.22.5,<2)", "pytesseract (>=0.3.13)", "python-dotenv (>=1.0.0,<2)", "python-pptx (>=1.0.2)", "requests-oauthlib (>=1.3.1,<2)", "rouge (>=1.0.1,<2)", "scenedetect (>=0.6.5.2)", "scrapegraph-py (>=1.12.0,<2)", "sentencepiece (>=0.2,<0.3)", "soundfile (>=0.13,<0.14)", "tabulate (>=0.9.0)", "transformers (>=4,<5)", "tree-sitter (>=0.23.2,<0.24)", "tree-sitter-python (>=0.23.6,<0.24)", "typer (>=0.15.2)", "unstructured (==0.16.20)", "wikipedia (>=1,<2)", "xls2xlsx (>=0.2.0)", "yt-dlp (>=2024.11.4,<2025)"] +rag = ["chromadb (>=0.6.0,<1.0.0)", "chunkr-ai (>=0.0.50)", "cohere (>=5.11.0,<6)", "crawl4ai (>=0.3.745)", "faiss-cpu (>=1.7.2,<2)", "google-genai (>=1.13.0)", "nebula3-python (==3.8.2)", "neo4j (>=5.18.0,<6)", "numpy (>=1.2,<=2.2)", "pandasai (>=2.3.0,<3)", "pymilvus (>=2.4.0,<3)", "pyobvector (>=0.1.18)", "pytidb-experimental (==0.0.1.dev4)", "qdrant-client (>=1.9.0,<2)", "rank-bm25 (>=0.2.2,<0.3)", "unstructured (==0.16.20)", "weaviate-client (>=4.15.0)"] research-tools = ["arxiv (>=2.1.3,<3)", "arxiv2text (>=0.1.14,<0.2)", "scholarly[tor] (==1.7.11)"] -storage = ["azure-storage-blob (>=12.21.0,<13)", "botocore (>=1.35.3,<2)", "faiss-cpu (>=1.7.2,<2)", "google-cloud-storage (>=2.18.0,<3)", "mem0ai (>=0.1.73)", "nebula3-python (==3.8.2)", "neo4j (>=5.18.0,<6)", "pymilvus (>=2.4.0,<3)", "pyobvector (>=0.1.18)", "pytidb-experimental (==0.0.1.dev4)", "qdrant-client (>=1.9.0,<2)", "redis (>=5.0.6,<6)"] -test = ["mock (>=5,<6)", "pytest (>=7,<8)", "pytest-asyncio (>=0.23.0,<0.24)"] +storage = ["azure-storage-blob (>=12.21.0,<13)", "botocore (>=1.35.3,<2)", "chromadb (>=0.6.0,<1.0.0)", "faiss-cpu (>=1.7.2,<2)", "google-cloud-storage (>=2.18.0,<3)", "mem0ai (>=0.1.73)", "nebula3-python (==3.8.2)", "neo4j (>=5.18.0,<6)", "pgvector (>=0.2.4,<0.3)", "psycopg[binary] (>=3.1.18,<4)", "pymilvus (>=2.4.0,<3)", "pyobvector (>=0.1.18)", "pytidb-experimental (==0.0.1.dev4)", "qdrant-client (>=1.9.0,<2)", "redis (>=5.0.6,<6)", "weaviate-client (>=4.15.0)"] web-tools = ["apify-client (>=1.8.1,<2)", "beautifulsoup4 (>=4,<5)", "dappier (>=0.3.3,<0.4)", "duckduckgo-search (>=6.3.5,<7)", "exa-py (>=1.10.0,<2)", "fastapi (>=0.115.11)", "firecrawl-py (>=1.0.0,<2)", "google-api-python-client (==2.166.0)", "google-auth-httplib2 (==0.2.0)", "google-auth-oauthlib (==1.2.1)", "googlemaps (>=4.10.0,<5)", "html2text (>=2024.2.26)", "linkup-sdk (>=0.2.1,<0.3)", "newspaper3k (>=0.2.8,<0.3)", "playwright (>=1.50.0)", "pyowm (>=3.3.0,<4)", "requests-oauthlib (>=1.3.1,<2)", "scrapegraph-py (>=1.12.0,<2)", "sympy (>=1.13.3,<2)", "tavily-python (>=0.5.0,<0.6)", "wikipedia (>=1,<2)", "wolframalpha (>=5.0.0,<6)"] [[package]] name = "certifi" -version = "2025.4.26" +version = "2025.6.15" description = "Python package for providing Mozilla's CA Bundle." optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +groups = ["main"] files = [ - {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"}, - {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"}, + {file = "certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057"}, + {file = "certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b"}, ] [[package]] @@ -173,6 +181,7 @@ version = "1.17.1" description = "Foreign Function Interface for Python calling C code." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, @@ -252,6 +261,7 @@ version = "3.4.0" description = "Validate configuration and produce human readable error messages." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, @@ -263,6 +273,7 @@ version = "5.2.0" description = "Universal encoding detector for Python 3" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, @@ -274,6 +285,7 @@ version = "3.4.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, @@ -375,6 +387,7 @@ version = "8.2.1" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.10" +groups = ["main"] files = [ {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, @@ -389,6 +402,7 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -396,61 +410,62 @@ files = [ [[package]] name = "cryptography" -version = "45.0.3" +version = "45.0.4" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = "!=3.9.0,!=3.9.1,>=3.7" -files = [ - {file = "cryptography-45.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:7573d9eebaeceeb55285205dbbb8753ac1e962af3d9640791d12b36864065e71"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d377dde61c5d67eb4311eace661c3efda46c62113ff56bf05e2d679e02aebb5b"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fae1e637f527750811588e4582988932c222f8251f7b7ea93739acb624e1487f"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ca932e11218bcc9ef812aa497cdf669484870ecbcf2d99b765d6c27a86000942"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:af3f92b1dc25621f5fad065288a44ac790c5798e986a34d393ab27d2b27fcff9"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2f8f8f0b73b885ddd7f3d8c2b2234a7d3ba49002b0223f58cfde1bedd9563c56"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9cc80ce69032ffa528b5e16d217fa4d8d4bb7d6ba8659c1b4d74a1b0f4235fca"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:c824c9281cb628015bfc3c59335163d4ca0540d49de4582d6c2637312907e4b1"}, - {file = "cryptography-45.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:5833bb4355cb377ebd880457663a972cd044e7f49585aee39245c0d592904578"}, - {file = "cryptography-45.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:9bb5bf55dcb69f7067d80354d0a348368da907345a2c448b0babc4215ccd3497"}, - {file = "cryptography-45.0.3-cp311-abi3-win32.whl", hash = "sha256:3ad69eeb92a9de9421e1f6685e85a10fbcfb75c833b42cc9bc2ba9fb00da4710"}, - {file = "cryptography-45.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:97787952246a77d77934d41b62fb1b6f3581d83f71b44796a4158d93b8f5c490"}, - {file = "cryptography-45.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:c92519d242703b675ccefd0f0562eb45e74d438e001f8ab52d628e885751fb06"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5edcb90da1843df85292ef3a313513766a78fbbb83f584a5a58fb001a5a9d57"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38deed72285c7ed699864f964a3f4cf11ab3fb38e8d39cfcd96710cd2b5bb716"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5555365a50efe1f486eed6ac7062c33b97ccef409f5970a0b6f205a7cfab59c8"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9e4253ed8f5948a3589b3caee7ad9a5bf218ffd16869c516535325fece163dcc"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cfd84777b4b6684955ce86156cfb5e08d75e80dc2585e10d69e47f014f0a5342"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:a2b56de3417fd5f48773ad8e91abaa700b678dc7fe1e0c757e1ae340779acf7b"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:57a6500d459e8035e813bd8b51b671977fb149a8c95ed814989da682314d0782"}, - {file = "cryptography-45.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f22af3c78abfbc7cbcdf2c55d23c3e022e1a462ee2481011d518c7fb9c9f3d65"}, - {file = "cryptography-45.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:232954730c362638544758a8160c4ee1b832dc011d2c41a306ad8f7cccc5bb0b"}, - {file = "cryptography-45.0.3-cp37-abi3-win32.whl", hash = "sha256:cb6ab89421bc90e0422aca911c69044c2912fc3debb19bb3c1bfe28ee3dff6ab"}, - {file = "cryptography-45.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:d54ae41e6bd70ea23707843021c778f151ca258081586f0cfa31d936ae43d1b2"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ed43d396f42028c1f47b5fec012e9e12631266e3825e95c00e3cf94d472dac49"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:fed5aaca1750e46db870874c9c273cd5182a9e9deb16f06f7bdffdb5c2bde4b9"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:00094838ecc7c6594171e8c8a9166124c1197b074cfca23645cee573910d76bc"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:92d5f428c1a0439b2040435a1d6bc1b26ebf0af88b093c3628913dd464d13fa1"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:ec64ee375b5aaa354b2b273c921144a660a511f9df8785e6d1c942967106438e"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:71320fbefd05454ef2d457c481ba9a5b0e540f3753354fff6f780927c25d19b0"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:edd6d51869beb7f0d472e902ef231a9b7689508e83880ea16ca3311a00bf5ce7"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:555e5e2d3a53b4fabeca32835878b2818b3f23966a4efb0d566689777c5a12c8"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:25286aacb947286620a31f78f2ed1a32cded7be5d8b729ba3fb2c988457639e4"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:050ce5209d5072472971e6efbfc8ec5a8f9a841de5a4db0ebd9c2e392cb81972"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:dc10ec1e9f21f33420cc05214989544727e776286c1c16697178978327b95c9c"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:9eda14f049d7f09c2e8fb411dda17dd6b16a3c76a1de5e249188a32aeb92de19"}, - {file = "cryptography-45.0.3.tar.gz", hash = "sha256:ec21313dd335c51d7877baf2972569f40a4291b76a0ce51391523ae358d05899"}, +groups = ["main"] +files = [ + {file = "cryptography-45.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:425a9a6ac2823ee6e46a76a21a4e8342d8fa5c01e08b823c1f19a8b74f096069"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:680806cf63baa0039b920f4976f5f31b10e772de42f16310a6839d9f21a26b0d"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4ca0f52170e821bc8da6fc0cc565b7bb8ff8d90d36b5e9fdd68e8a86bdf72036"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f3fe7a5ae34d5a414957cc7f457e2b92076e72938423ac64d215722f6cf49a9e"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:25eb4d4d3e54595dc8adebc6bbd5623588991d86591a78c2548ffb64797341e2"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ce1678a2ccbe696cf3af15a75bb72ee008d7ff183c9228592ede9db467e64f1b"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:49fe9155ab32721b9122975e168a6760d8ce4cffe423bcd7ca269ba41b5dfac1"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:2882338b2a6e0bd337052e8b9007ced85c637da19ef9ecaf437744495c8c2999"}, + {file = "cryptography-45.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:23b9c3ea30c3ed4db59e7b9619272e94891f8a3a5591d0b656a7582631ccf750"}, + {file = "cryptography-45.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b0a97c927497e3bc36b33987abb99bf17a9a175a19af38a892dc4bbb844d7ee2"}, + {file = "cryptography-45.0.4-cp311-abi3-win32.whl", hash = "sha256:e00a6c10a5c53979d6242f123c0a97cff9f3abed7f064fc412c36dc521b5f257"}, + {file = "cryptography-45.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:817ee05c6c9f7a69a16200f0c90ab26d23a87701e2a284bd15156783e46dbcc8"}, + {file = "cryptography-45.0.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:964bcc28d867e0f5491a564b7debb3ffdd8717928d315d12e0d7defa9e43b723"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6a5bf57554e80f75a7db3d4b1dacaa2764611ae166ab42ea9a72bcdb5d577637"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:46cf7088bf91bdc9b26f9c55636492c1cce3e7aaf8041bbf0243f5e5325cfb2d"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7bedbe4cc930fa4b100fc845ea1ea5788fcd7ae9562e669989c11618ae8d76ee"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:eaa3e28ea2235b33220b949c5a0d6cf79baa80eab2eb5607ca8ab7525331b9ff"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:7ef2dde4fa9408475038fc9aadfc1fb2676b174e68356359632e980c661ec8f6"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:6a3511ae33f09094185d111160fd192c67aa0a2a8d19b54d36e4c78f651dc5ad"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:06509dc70dd71fa56eaa138336244e2fbaf2ac164fc9b5e66828fccfd2b680d6"}, + {file = "cryptography-45.0.4-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:5f31e6b0a5a253f6aa49be67279be4a7e5a4ef259a9f33c69f7d1b1191939872"}, + {file = "cryptography-45.0.4-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:944e9ccf67a9594137f942d5b52c8d238b1b4e46c7a0c2891b7ae6e01e7c80a4"}, + {file = "cryptography-45.0.4-cp37-abi3-win32.whl", hash = "sha256:c22fe01e53dc65edd1945a2e6f0015e887f84ced233acecb64b4daadb32f5c97"}, + {file = "cryptography-45.0.4-cp37-abi3-win_amd64.whl", hash = "sha256:627ba1bc94f6adf0b0a2e35d87020285ead22d9f648c7e75bb64f367375f3b22"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a77c6fb8d76e9c9f99f2f3437c1a4ac287b34eaf40997cfab1e9bd2be175ac39"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7aad98a25ed8ac917fdd8a9c1e706e5a0956e06c498be1f713b61734333a4507"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3530382a43a0e524bc931f187fc69ef4c42828cf7d7f592f7f249f602b5a4ab0"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:6b613164cb8425e2f8db5849ffb84892e523bf6d26deb8f9bb76ae86181fa12b"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:96d4819e25bf3b685199b304a0029ce4a3caf98947ce8a066c9137cc78ad2c58"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b97737a3ffbea79eebb062eb0d67d72307195035332501722a9ca86bab9e3ab2"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4828190fb6c4bcb6ebc6331f01fe66ae838bb3bd58e753b59d4b22eb444b996c"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:03dbff8411206713185b8cebe31bc5c0eb544799a50c09035733716b386e61a4"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:51dfbd4d26172d31150d84c19bbe06c68ea4b7f11bbc7b3a5e146b367c311349"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:0339a692de47084969500ee455e42c58e449461e0ec845a34a6a9b9bf7df7fb8"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:0cf13c77d710131d33e63626bd55ae7c0efb701ebdc2b3a7952b9b23a0412862"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:bbc505d1dc469ac12a0a064214879eac6294038d6b24ae9f71faae1448a9608d"}, + {file = "cryptography-45.0.4.tar.gz", hash = "sha256:7405ade85c83c37682c8fe65554759800a4a8c54b2d96e0f8ad114d31b808d57"}, ] [package.dependencies] cffi = {version = ">=1.14", markers = "platform_python_implementation != \"PyPy\""} [package.extras] -docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs", "sphinx-rtd-theme (>=3.0.0)"] +docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs ; python_full_version >= \"3.8.0\"", "sphinx-rtd-theme (>=3.0.0) ; python_full_version >= \"3.8.0\""] docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"] -nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2)"] -pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"] +nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_full_version >= \"3.8.0\""] +pep8test = ["check-sdist ; python_full_version >= \"3.8.0\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"] sdist = ["build (>=1.0.0)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi (>=2024)", "cryptography-vectors (==45.0.3)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] +test = ["certifi (>=2024)", "cryptography-vectors (==45.0.4)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] test-randomorder = ["pytest-randomly"] [[package]] @@ -459,6 +474,7 @@ version = "0.6.7" description = "Easily serialize dataclasses to and from JSON." optional = false python-versions = "<4.0,>=3.7" +groups = ["main"] files = [ {file = "dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a"}, {file = "dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0"}, @@ -474,6 +490,7 @@ version = "0.3.9" description = "Distribution utilities" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, @@ -485,6 +502,7 @@ version = "1.9.0" description = "Distro - an OS platform information API" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, @@ -496,6 +514,7 @@ version = "0.15" description = "Parse Python docstrings in reST, Google and Numpydoc format" optional = false python-versions = ">=3.6,<4.0" +groups = ["main"] files = [ {file = "docstring_parser-0.15-py3-none-any.whl", hash = "sha256:d1679b86250d269d06a99670924d6bce45adc00b08069dae8c47d98e89b667a9"}, {file = "docstring_parser-0.15.tar.gz", hash = "sha256:48ddc093e8b1865899956fcc03b03e66bb7240c310fac5af81814580c55bf682"}, @@ -507,6 +526,7 @@ version = "2.14.1" description = "Emoji for Python" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "emoji-2.14.1-py3-none-any.whl", hash = "sha256:35a8a486c1460addb1499e3bf7929d3889b2e2841a57401903699fef595e942b"}, {file = "emoji-2.14.1.tar.gz", hash = "sha256:f8c50043d79a2c1410ebfae833ae1868d5941a67a6cd4d18377e2eb0bd79346b"}, @@ -521,6 +541,8 @@ version = "1.3.0" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["main"] +markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, @@ -538,6 +560,7 @@ version = "3.18.0" description = "A platform independent file lock." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de"}, {file = "filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2"}, @@ -546,7 +569,7 @@ files = [ [package.extras] docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] -typing = ["typing-extensions (>=4.12.2)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] [[package]] name = "filetype" @@ -554,6 +577,7 @@ version = "1.2.0" description = "Infer file type and MIME type of any file/buffer. No external dependencies." optional = false python-versions = "*" +groups = ["main"] files = [ {file = "filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25"}, {file = "filetype-1.2.0.tar.gz", hash = "sha256:66b56cd6474bf41d8c54660347d37afcc3f7d1970648de365c102ef77548aadb"}, @@ -565,6 +589,7 @@ version = "2025.5.1" description = "File-system specification" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "fsspec-2025.5.1-py3-none-any.whl", hash = "sha256:24d3a2e663d5fc735ab256263c4075f374a174c3410c0b25e5bd1970bceaa462"}, {file = "fsspec-2025.5.1.tar.gz", hash = "sha256:2e55e47a540b91843b755e83ded97c6e897fa0942b11490113f09e9c443c2475"}, @@ -604,6 +629,7 @@ version = "0.16.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, @@ -611,19 +637,21 @@ files = [ [[package]] name = "hf-xet" -version = "1.1.3" +version = "1.1.5" description = "Fast transfer of large files with the Hugging Face Hub." optional = false python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\"" files = [ - {file = "hf_xet-1.1.3-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:c3b508b5f583a75641aebf732853deb058953370ce8184f5dabc49f803b0819b"}, - {file = "hf_xet-1.1.3-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:b788a61977fbe6b5186e66239e2a329a3f0b7e7ff50dad38984c0c74f44aeca1"}, - {file = "hf_xet-1.1.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd2da210856444a34aad8ada2fc12f70dabed7cc20f37e90754d1d9b43bc0534"}, - {file = "hf_xet-1.1.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8203f52827e3df65981984936654a5b390566336956f65765a8aa58c362bb841"}, - {file = "hf_xet-1.1.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:30c575a5306f8e6fda37edb866762140a435037365eba7a17ce7bd0bc0216a8b"}, - {file = "hf_xet-1.1.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7c1a6aa6abed1f696f8099aa9796ca04c9ee778a58728a115607de9cc4638ff1"}, - {file = "hf_xet-1.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:b578ae5ac9c056296bb0df9d018e597c8dc6390c5266f35b5c44696003cde9f3"}, - {file = "hf_xet-1.1.3.tar.gz", hash = "sha256:a5f09b1dd24e6ff6bcedb4b0ddab2d81824098bb002cf8b4ffa780545fa348c3"}, + {file = "hf_xet-1.1.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:f52c2fa3635b8c37c7764d8796dfa72706cc4eded19d638331161e82b0792e23"}, + {file = "hf_xet-1.1.5-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:9fa6e3ee5d61912c4a113e0708eaaef987047616465ac7aa30f7121a48fc1af8"}, + {file = "hf_xet-1.1.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc874b5c843e642f45fd85cda1ce599e123308ad2901ead23d3510a47ff506d1"}, + {file = "hf_xet-1.1.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dbba1660e5d810bd0ea77c511a99e9242d920790d0e63c0e4673ed36c4022d18"}, + {file = "hf_xet-1.1.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ab34c4c3104133c495785d5d8bba3b1efc99de52c02e759cf711a91fd39d3a14"}, + {file = "hf_xet-1.1.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:83088ecea236d5113de478acb2339f92c95b4fb0462acaa30621fac02f5a534a"}, + {file = "hf_xet-1.1.5-cp37-abi3-win_amd64.whl", hash = "sha256:73e167d9807d166596b4b2f0b585c6d5bd84a26dea32843665a8b58f6edba245"}, + {file = "hf_xet-1.1.5.tar.gz", hash = "sha256:69ebbcfd9ec44fdc2af73441619eeb06b94ee34511bbcf57cd423820090f5694"}, ] [package.extras] @@ -635,6 +663,7 @@ version = "1.0.9" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, @@ -656,6 +685,7 @@ version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, @@ -668,7 +698,7 @@ httpcore = "==1.*" idna = "*" [package.extras] -brotli = ["brotli", "brotlicffi"] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] @@ -676,24 +706,26 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "httpx-sse" -version = "0.4.0" +version = "0.4.1" description = "Consume Server-Sent Event (SSE) messages with HTTPX." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, - {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, + {file = "httpx_sse-0.4.1-py3-none-any.whl", hash = "sha256:cba42174344c3a5b06f255ce65b350880f962d99ead85e776f23c6618a377a37"}, + {file = "httpx_sse-0.4.1.tar.gz", hash = "sha256:8f44d34414bc7b21bf3602713005c5df4917884f76072479b21f68befa4ea26e"}, ] [[package]] name = "huggingface-hub" -version = "0.32.4" +version = "0.33.1" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" +groups = ["main"] files = [ - {file = "huggingface_hub-0.32.4-py3-none-any.whl", hash = "sha256:37abf8826b38d971f60d3625229221c36e53fe58060286db9baf619cfbf39767"}, - {file = "huggingface_hub-0.32.4.tar.gz", hash = "sha256:f61d45cd338736f59fb0e97550b74c24ee771bcc92c05ae0766b9116abe720be"}, + {file = "huggingface_hub-0.33.1-py3-none-any.whl", hash = "sha256:ec8d7444628210c0ba27e968e3c4c973032d44dcea59ca0d78ef3f612196f095"}, + {file = "huggingface_hub-0.33.1.tar.gz", hash = "sha256:589b634f979da3ea4b8bdb3d79f97f547840dc83715918daf0b64209c0844c7b"}, ] [package.dependencies] @@ -707,16 +739,16 @@ tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "authlib (>=1.3.2)", "fastapi", "gradio (>=4.0.0)", "httpx", "itsdangerous", "jedi", "libcst (==1.4.0)", "mypy (==1.15.0)", "mypy (>=1.14.1,<1.15.0)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "authlib (>=1.3.2)", "fastapi", "gradio (>=4.0.0)", "httpx", "itsdangerous", "jedi", "libcst (==1.4.0)", "mypy (==1.15.0) ; python_version >= \"3.9\"", "mypy (>=1.14.1,<1.15.0) ; python_version == \"3.8\"", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "authlib (>=1.3.2)", "fastapi", "gradio (>=4.0.0)", "httpx", "itsdangerous", "jedi", "libcst (==1.4.0)", "mypy (==1.15.0)", "mypy (>=1.14.1,<1.15.0)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "authlib (>=1.3.2)", "fastapi", "gradio (>=4.0.0)", "httpx", "itsdangerous", "jedi", "libcst (==1.4.0)", "mypy (==1.15.0) ; python_version >= \"3.9\"", "mypy (>=1.14.1,<1.15.0) ; python_version == \"3.8\"", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] hf-transfer = ["hf-transfer (>=0.1.4)"] hf-xet = ["hf-xet (>=1.1.2,<2.0.0)"] inference = ["aiohttp"] mcp = ["aiohttp", "mcp (>=1.8.0)", "typer"] oauth = ["authlib (>=1.3.2)", "fastapi", "httpx", "itsdangerous"] -quality = ["libcst (==1.4.0)", "mypy (==1.15.0)", "mypy (>=1.14.1,<1.15.0)", "ruff (>=0.9.0)"] +quality = ["libcst (==1.4.0)", "mypy (==1.15.0) ; python_version >= \"3.9\"", "mypy (>=1.14.1,<1.15.0) ; python_version == \"3.8\"", "ruff (>=0.9.0)"] tensorflow = ["graphviz", "pydot", "tensorflow"] tensorflow-testing = ["keras (<3.0)", "tensorflow"] testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "authlib (>=1.3.2)", "fastapi", "gradio (>=4.0.0)", "httpx", "itsdangerous", "jedi", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] @@ -729,6 +761,7 @@ version = "2.6.12" description = "File identification library for Python" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "identify-2.6.12-py2.py3-none-any.whl", hash = "sha256:ad9672d5a72e0d2ff7c5c8809b62dfa60458626352fb0eb7b55e69bdc45334a2"}, {file = "identify-2.6.12.tar.gz", hash = "sha256:d8de45749f1efb108badef65ee8386f0f7bb19a7f26185f74de6367bffbaf0e6"}, @@ -743,6 +776,7 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -757,6 +791,7 @@ version = "0.11.6" description = "High performance graph data structures and algorithms" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "igraph-0.11.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3f8b837181e8e87676be3873ce87cc92cc234efd58a2da2f6b4e050db150fcf4"}, {file = "igraph-0.11.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:245c4b7d7657849eff80416f5df4525c8fc44c74a981ee4d44f0ef2612c3bada"}, @@ -805,10 +840,10 @@ texttable = ">=1.6.2" [package.extras] cairo = ["cairocffi (>=1.2.0)"] doc = ["Sphinx (>=7.0.0)", "pydoctor (>=23.4.0)", "sphinx-gallery (>=0.14.0)", "sphinx-rtd-theme (>=1.3.0)"] -matplotlib = ["matplotlib (>=3.6.0)"] +matplotlib = ["matplotlib (>=3.6.0) ; platform_python_implementation != \"PyPy\""] plotly = ["plotly (>=5.3.0)"] plotting = ["cairocffi (>=1.2.0)"] -test = ["Pillow (>=9)", "cairocffi (>=1.2.0)", "matplotlib (>=3.6.0)", "networkx (>=2.5)", "numpy (>=1.19.0)", "pandas (>=1.1.0)", "plotly (>=5.3.0)", "pytest (>=7.0.1)", "pytest-timeout (>=2.1.0)", "scipy (>=1.5.0)"] +test = ["Pillow (>=9) ; platform_python_implementation != \"PyPy\"", "cairocffi (>=1.2.0)", "matplotlib (>=3.6.0) ; platform_python_implementation != \"PyPy\"", "networkx (>=2.5)", "numpy (>=1.19.0) ; platform_python_implementation != \"PyPy\"", "pandas (>=1.1.0) ; platform_python_implementation != \"PyPy\"", "plotly (>=5.3.0)", "pytest (>=7.0.1)", "pytest-timeout (>=2.1.0)", "scipy (>=1.5.0) ; platform_python_implementation != \"PyPy\""] test-musl = ["cairocffi (>=1.2.0)", "networkx (>=2.5)", "pytest (>=7.0.1)", "pytest-timeout (>=2.1.0)"] [[package]] @@ -817,6 +852,7 @@ version = "2.1.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, @@ -828,6 +864,7 @@ version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, @@ -845,6 +882,7 @@ version = "0.10.0" description = "Fast iterable JSON parser." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "jiter-0.10.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:cd2fb72b02478f06a900a5782de2ef47e0396b3e1f7d5aba30daeb1fce66f303"}, {file = "jiter-0.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:32bb468e3af278f095d3fa5b90314728a6916d89ba3d0ffb726dd9bf7367285e"}, @@ -931,6 +969,7 @@ version = "1.5.1" description = "Lightweight pipelining with Python functions" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "joblib-1.5.1-py3-none-any.whl", hash = "sha256:4719a31f054c7d766948dcd83e9613686b27114f190f717cec7eaa2084f8a74a"}, {file = "joblib-1.5.1.tar.gz", hash = "sha256:f4f86e351f39fe3d0d32a9f2c3d8af1ee4cec285aafcb27003dda5205576b444"}, @@ -942,6 +981,7 @@ version = "4.24.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "jsonschema-4.24.0-py3-none-any.whl", hash = "sha256:a462455f19f5faf404a7902952b6f0e3ce868f3ee09a359b05eca6673bd8412d"}, {file = "jsonschema-4.24.0.tar.gz", hash = "sha256:0b4e8069eb12aedfa881333004bccaec24ecef5a8a6a4b6df142b2cc9599d196"}, @@ -963,6 +1003,7 @@ version = "0.3.4" description = "JSONSchema Spec with object-oriented paths" optional = false python-versions = "<4.0.0,>=3.8.0" +groups = ["main"] files = [ {file = "jsonschema_path-0.3.4-py3-none-any.whl", hash = "sha256:f502191fdc2b22050f9a81c9237be9d27145b9001c55842bece5e94e382e52f8"}, {file = "jsonschema_path-0.3.4.tar.gz", hash = "sha256:8365356039f16cc65fddffafda5f58766e34bebab7d6d105616ab52bc4297001"}, @@ -980,6 +1021,7 @@ version = "2025.4.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af"}, {file = "jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608"}, @@ -994,6 +1036,7 @@ version = "1.0.9" description = "Language detection library ported from Google's language-detection." optional = false python-versions = "*" +groups = ["main"] files = [ {file = "langdetect-1.0.9-py2-none-any.whl", hash = "sha256:7cbc0746252f19e76f77c0b1690aadf01963be835ef0cd4b56dddf2a8f1dfc2a"}, {file = "langdetect-1.0.9.tar.gz", hash = "sha256:cbc1fef89f8d062739774bd51eda3da3274006b3661d199c2655f6b3f6d605a0"}, @@ -1008,6 +1051,7 @@ version = "1.11.0" description = "A fast and thorough lazy object proxy." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "lazy_object_proxy-1.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:132bc8a34f2f2d662a851acfd1b93df769992ed1b81e2b1fda7db3e73b0d5a18"}, {file = "lazy_object_proxy-1.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:01261a3afd8621a1accb5682df2593dc7ec7d21d38f411011a5712dcd418fbed"}, @@ -1031,6 +1075,7 @@ version = "5.4.0" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "lxml-5.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e7bc6df34d42322c5289e37e9971d6ed114e3776b45fa879f734bded9d1fea9c"}, {file = "lxml-5.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6854f8bd8a1536f8a1d9a3655e6354faa6406621cf857dc27b681b69860645c7"}, @@ -1179,6 +1224,7 @@ version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, @@ -1249,6 +1295,7 @@ version = "3.26.1" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "marshmallow-3.26.1-py3-none-any.whl", hash = "sha256:3350409f20a70a7e4e11a27661187b77cdcaeb20abca41c1454fe33636bea09c"}, {file = "marshmallow-3.26.1.tar.gz", hash = "sha256:e6d8affb6cb61d39d26402096dc0aee12d5a26d490a121f118d2e81dc0719dc6"}, @@ -1264,13 +1311,14 @@ tests = ["pytest", "simplejson"] [[package]] name = "mcp" -version = "1.9.2" +version = "1.9.4" description = "Model Context Protocol SDK" optional = false python-versions = ">=3.10" +groups = ["main"] files = [ - {file = "mcp-1.9.2-py3-none-any.whl", hash = "sha256:bc29f7fd67d157fef378f89a4210384f5fecf1168d0feb12d22929818723f978"}, - {file = "mcp-1.9.2.tar.gz", hash = "sha256:3c7651c053d635fd235990a12e84509fe32780cd359a5bbef352e20d4d963c05"}, + {file = "mcp-1.9.4-py3-none-any.whl", hash = "sha256:7fcf36b62936adb8e63f89346bccca1268eeca9bf6dfb562ee10b1dfbda9dac0"}, + {file = "mcp-1.9.4.tar.gz", hash = "sha256:cfb0bcd1a9535b42edaef89947b9e18a8feb49362e1cc059d6e7fc636f2cb09f"}, ] [package.dependencies] @@ -1295,6 +1343,7 @@ version = "1.3.0" description = "Python library for arbitrary-precision floating-point arithmetic" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, @@ -1303,7 +1352,7 @@ files = [ [package.extras] develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] docs = ["sphinx"] -gmpy = ["gmpy2 (>=2.1.0a4)"] +gmpy = ["gmpy2 (>=2.1.0a4) ; platform_python_implementation != \"PyPy\""] tests = ["pytest (>=4.6)"] [[package]] @@ -1312,6 +1361,7 @@ version = "1.1.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, @@ -1323,6 +1373,7 @@ version = "5.23.0" description = "Neo4j Bolt driver for Python" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "neo4j-5.23.0-py3-none-any.whl", hash = "sha256:5d8d2f45227c12d6ba564720cbc3e2f57aac472e4fa14fe69270e4f952791020"}, {file = "neo4j-5.23.0.tar.gz", hash = "sha256:26b06dac3a4b93d882a61714c5ca8d06fe68f697cbdfe113ab840d651a2d46a2"}, @@ -1342,6 +1393,7 @@ version = "1.6.0" description = "Patch asyncio to allow nested event loops" optional = false python-versions = ">=3.5" +groups = ["main"] files = [ {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, @@ -1353,6 +1405,8 @@ version = "3.4.2" description = "Python package for creating and manipulating graphs and networks" optional = false python-versions = ">=3.10" +groups = ["main"] +markers = "python_version < \"3.11\"" files = [ {file = "networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f"}, {file = "networkx-3.4.2.tar.gz", hash = "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1"}, @@ -1366,12 +1420,35 @@ example = ["cairocffi (>=1.7)", "contextily (>=1.6)", "igraph (>=0.11)", "momepy extra = ["lxml (>=4.6)", "pydot (>=3.0.1)", "pygraphviz (>=1.14)", "sympy (>=1.10)"] test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] +[[package]] +name = "networkx" +version = "3.5" +description = "Python package for creating and manipulating graphs and networks" +optional = false +python-versions = ">=3.11" +groups = ["main"] +markers = "python_version == \"3.11\"" +files = [ + {file = "networkx-3.5-py3-none-any.whl", hash = "sha256:0030d386a9a06dee3565298b4a734b68589749a544acbb6c412dc9e2489ec6ec"}, + {file = "networkx-3.5.tar.gz", hash = "sha256:d4c6f9cf81f52d69230866796b82afbccdec3db7ae4fbd1b65ea750feed50037"}, +] + +[package.extras] +default = ["matplotlib (>=3.8)", "numpy (>=1.25)", "pandas (>=2.0)", "scipy (>=1.11.2)"] +developer = ["mypy (>=1.15)", "pre-commit (>=4.1)"] +doc = ["intersphinx-registry", "myst-nb (>=1.1)", "numpydoc (>=1.8.0)", "pillow (>=10)", "pydata-sphinx-theme (>=0.16)", "sphinx (>=8.0)", "sphinx-gallery (>=0.18)", "texext (>=0.6.7)"] +example = ["cairocffi (>=1.7)", "contextily (>=1.6)", "igraph (>=0.11)", "momepy (>=0.7.2)", "osmnx (>=2.0.0)", "scikit-learn (>=1.5)", "seaborn (>=0.13)"] +extra = ["lxml (>=4.6)", "pydot (>=3.0.1)", "pygraphviz (>=1.14)", "sympy (>=1.10)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)", "pytest-xdist (>=3.0)"] +test-extras = ["pytest-mpl", "pytest-randomly"] + [[package]] name = "nltk" version = "3.9.1" description = "Natural Language Toolkit" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "nltk-3.9.1-py3-none-any.whl", hash = "sha256:4fa26829c5b00715afe3061398a8989dc643b92ce7dd93fb4585a70930d168a1"}, {file = "nltk-3.9.1.tar.gz", hash = "sha256:87d127bd3de4bd89a4f81265e5fa59cb1b199b27440175370f7417d2bc7ae868"}, @@ -1397,6 +1474,7 @@ version = "1.9.1" description = "Node.js virtual environment builder" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main"] files = [ {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, @@ -1408,6 +1486,8 @@ version = "2.2.6" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.10" +groups = ["main"] +markers = "python_version < \"3.11\"" files = [ {file = "numpy-2.2.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b412caa66f72040e6d268491a59f2c43bf03eb6c96dd8f0307829feb7fa2b6fb"}, {file = "numpy-2.2.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e41fd67c52b86603a91c1a505ebaef50b3314de0213461c7a6e99c9a3beff90"}, @@ -1466,12 +1546,76 @@ files = [ {file = "numpy-2.2.6.tar.gz", hash = "sha256:e29554e2bef54a90aa5cc07da6ce955accb83f21ab5de01a62c8478897b264fd"}, ] +[[package]] +name = "numpy" +version = "2.3.1" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.11" +groups = ["main"] +markers = "python_version == \"3.11\"" +files = [ + {file = "numpy-2.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6ea9e48336a402551f52cd8f593343699003d2353daa4b72ce8d34f66b722070"}, + {file = "numpy-2.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ccb7336eaf0e77c1635b232c141846493a588ec9ea777a7c24d7166bb8533ae"}, + {file = "numpy-2.3.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:0bb3a4a61e1d327e035275d2a993c96fa786e4913aa089843e6a2d9dd205c66a"}, + {file = "numpy-2.3.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:e344eb79dab01f1e838ebb67aab09965fb271d6da6b00adda26328ac27d4a66e"}, + {file = "numpy-2.3.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:467db865b392168ceb1ef1ffa6f5a86e62468c43e0cfb4ab6da667ede10e58db"}, + {file = "numpy-2.3.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:afed2ce4a84f6b0fc6c1ce734ff368cbf5a5e24e8954a338f3bdffa0718adffb"}, + {file = "numpy-2.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0025048b3c1557a20bc80d06fdeb8cc7fc193721484cca82b2cfa072fec71a93"}, + {file = "numpy-2.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a5ee121b60aa509679b682819c602579e1df14a5b07fe95671c8849aad8f2115"}, + {file = "numpy-2.3.1-cp311-cp311-win32.whl", hash = "sha256:a8b740f5579ae4585831b3cf0e3b0425c667274f82a484866d2adf9570539369"}, + {file = "numpy-2.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:d4580adadc53311b163444f877e0789f1c8861e2698f6b2a4ca852fda154f3ff"}, + {file = "numpy-2.3.1-cp311-cp311-win_arm64.whl", hash = "sha256:ec0bdafa906f95adc9a0c6f26a4871fa753f25caaa0e032578a30457bff0af6a"}, + {file = "numpy-2.3.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2959d8f268f3d8ee402b04a9ec4bb7604555aeacf78b360dc4ec27f1d508177d"}, + {file = "numpy-2.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:762e0c0c6b56bdedfef9a8e1d4538556438288c4276901ea008ae44091954e29"}, + {file = "numpy-2.3.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:867ef172a0976aaa1f1d1b63cf2090de8b636a7674607d514505fb7276ab08fc"}, + {file = "numpy-2.3.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:4e602e1b8682c2b833af89ba641ad4176053aaa50f5cacda1a27004352dde943"}, + {file = "numpy-2.3.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:8e333040d069eba1652fb08962ec5b76af7f2c7bce1df7e1418c8055cf776f25"}, + {file = "numpy-2.3.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:e7cbf5a5eafd8d230a3ce356d892512185230e4781a361229bd902ff403bc660"}, + {file = "numpy-2.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5f1b8f26d1086835f442286c1d9b64bb3974b0b1e41bb105358fd07d20872952"}, + {file = "numpy-2.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ee8340cb48c9b7a5899d1149eece41ca535513a9698098edbade2a8e7a84da77"}, + {file = "numpy-2.3.1-cp312-cp312-win32.whl", hash = "sha256:e772dda20a6002ef7061713dc1e2585bc1b534e7909b2030b5a46dae8ff077ab"}, + {file = "numpy-2.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:cfecc7822543abdea6de08758091da655ea2210b8ffa1faf116b940693d3df76"}, + {file = "numpy-2.3.1-cp312-cp312-win_arm64.whl", hash = "sha256:7be91b2239af2658653c5bb6f1b8bccafaf08226a258caf78ce44710a0160d30"}, + {file = "numpy-2.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:25a1992b0a3fdcdaec9f552ef10d8103186f5397ab45e2d25f8ac51b1a6b97e8"}, + {file = "numpy-2.3.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7dea630156d39b02a63c18f508f85010230409db5b2927ba59c8ba4ab3e8272e"}, + {file = "numpy-2.3.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:bada6058dd886061f10ea15f230ccf7dfff40572e99fef440a4a857c8728c9c0"}, + {file = "numpy-2.3.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:a894f3816eb17b29e4783e5873f92faf55b710c2519e5c351767c51f79d8526d"}, + {file = "numpy-2.3.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:18703df6c4a4fee55fd3d6e5a253d01c5d33a295409b03fda0c86b3ca2ff41a1"}, + {file = "numpy-2.3.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:5902660491bd7a48b2ec16c23ccb9124b8abfd9583c5fdfa123fe6b421e03de1"}, + {file = "numpy-2.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:36890eb9e9d2081137bd78d29050ba63b8dab95dff7912eadf1185e80074b2a0"}, + {file = "numpy-2.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a780033466159c2270531e2b8ac063704592a0bc62ec4a1b991c7c40705eb0e8"}, + {file = "numpy-2.3.1-cp313-cp313-win32.whl", hash = "sha256:39bff12c076812595c3a306f22bfe49919c5513aa1e0e70fac756a0be7c2a2b8"}, + {file = "numpy-2.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:8d5ee6eec45f08ce507a6570e06f2f879b374a552087a4179ea7838edbcbfa42"}, + {file = "numpy-2.3.1-cp313-cp313-win_arm64.whl", hash = "sha256:0c4d9e0a8368db90f93bd192bfa771ace63137c3488d198ee21dfb8e7771916e"}, + {file = "numpy-2.3.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:b0b5397374f32ec0649dd98c652a1798192042e715df918c20672c62fb52d4b8"}, + {file = "numpy-2.3.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c5bdf2015ccfcee8253fb8be695516ac4457c743473a43290fd36eba6a1777eb"}, + {file = "numpy-2.3.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d70f20df7f08b90a2062c1f07737dd340adccf2068d0f1b9b3d56e2038979fee"}, + {file = "numpy-2.3.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:2fb86b7e58f9ac50e1e9dd1290154107e47d1eef23a0ae9145ded06ea606f992"}, + {file = "numpy-2.3.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:23ab05b2d241f76cb883ce8b9a93a680752fbfcbd51c50eff0b88b979e471d8c"}, + {file = "numpy-2.3.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:ce2ce9e5de4703a673e705183f64fd5da5bf36e7beddcb63a25ee2286e71ca48"}, + {file = "numpy-2.3.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c4913079974eeb5c16ccfd2b1f09354b8fed7e0d6f2cab933104a09a6419b1ee"}, + {file = "numpy-2.3.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:010ce9b4f00d5c036053ca684c77441f2f2c934fd23bee058b4d6f196efd8280"}, + {file = "numpy-2.3.1-cp313-cp313t-win32.whl", hash = "sha256:6269b9edfe32912584ec496d91b00b6d34282ca1d07eb10e82dfc780907d6c2e"}, + {file = "numpy-2.3.1-cp313-cp313t-win_amd64.whl", hash = "sha256:2a809637460e88a113e186e87f228d74ae2852a2e0c44de275263376f17b5bdc"}, + {file = "numpy-2.3.1-cp313-cp313t-win_arm64.whl", hash = "sha256:eccb9a159db9aed60800187bc47a6d3451553f0e1b08b068d8b277ddfbb9b244"}, + {file = "numpy-2.3.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ad506d4b09e684394c42c966ec1527f6ebc25da7f4da4b1b056606ffe446b8a3"}, + {file = "numpy-2.3.1-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:ebb8603d45bc86bbd5edb0d63e52c5fd9e7945d3a503b77e486bd88dde67a19b"}, + {file = "numpy-2.3.1-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:15aa4c392ac396e2ad3d0a2680c0f0dee420f9fed14eef09bdb9450ee6dcb7b7"}, + {file = "numpy-2.3.1-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c6e0bf9d1a2f50d2b65a7cf56db37c095af17b59f6c132396f7c6d5dd76484df"}, + {file = "numpy-2.3.1-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:eabd7e8740d494ce2b4ea0ff05afa1b7b291e978c0ae075487c51e8bd93c0c68"}, + {file = "numpy-2.3.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:e610832418a2bc09d974cc9fecebfa51e9532d6190223bc5ef6a7402ebf3b5cb"}, + {file = "numpy-2.3.1.tar.gz", hash = "sha256:1ec9ae20a4226da374362cca3c62cd753faf2f951440b0e3b98e93c235441d2b"}, +] + [[package]] name = "nvidia-cublas-cu12" version = "12.6.4.1" description = "CUBLAS native runtime libraries" optional = false python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cublas_cu12-12.6.4.1-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:08ed2686e9875d01b58e3cb379c6896df8e76c75e0d4a7f7dace3d7b6d9ef8eb"}, {file = "nvidia_cublas_cu12-12.6.4.1-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:235f728d6e2a409eddf1df58d5b0921cf80cfa9e72b9f2775ccb7b4a87984668"}, @@ -1484,6 +1628,8 @@ version = "12.6.80" description = "CUDA profiling tools runtime libs." optional = false python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:166ee35a3ff1587f2490364f90eeeb8da06cd867bd5b701bf7f9a02b78bc63fc"}, {file = "nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_aarch64.whl", hash = "sha256:358b4a1d35370353d52e12f0a7d1769fc01ff74a191689d3870b2123156184c4"}, @@ -1498,6 +1644,8 @@ version = "12.6.77" description = "NVRTC native runtime libraries" optional = false python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cuda_nvrtc_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:5847f1d6e5b757f1d2b3991a01082a44aad6f10ab3c5c0213fa3e25bddc25a13"}, {file = "nvidia_cuda_nvrtc_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:35b0cc6ee3a9636d5409133e79273ce1f3fd087abb0532d2d2e8fff1fe9efc53"}, @@ -1510,6 +1658,8 @@ version = "12.6.77" description = "CUDA Runtime native Libraries" optional = false python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6116fad3e049e04791c0256a9778c16237837c08b27ed8c8401e2e45de8d60cd"}, {file = "nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d461264ecb429c84c8879a7153499ddc7b19b5f8d84c204307491989a365588e"}, @@ -1524,6 +1674,8 @@ version = "9.5.1.17" description = "cuDNN runtime libraries" optional = false python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cudnn_cu12-9.5.1.17-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:9fd4584468533c61873e5fda8ca41bac3a38bcb2d12350830c69b0a96a7e4def"}, {file = "nvidia_cudnn_cu12-9.5.1.17-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:30ac3869f6db17d170e0e556dd6cc5eee02647abc31ca856634d5a40f82c15b2"}, @@ -1539,6 +1691,8 @@ version = "11.3.0.4" description = "CUFFT native runtime libraries" optional = false python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d16079550df460376455cba121db6564089176d9bac9e4f360493ca4741b22a6"}, {file = "nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8510990de9f96c803a051822618d42bf6cb8f069ff3f48d93a8486efdacb48fb"}, @@ -1556,6 +1710,8 @@ version = "1.11.1.6" description = "cuFile GPUDirect libraries" optional = false python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cufile_cu12-1.11.1.6-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc23469d1c7e52ce6c1d55253273d32c565dd22068647f3aa59b3c6b005bf159"}, {file = "nvidia_cufile_cu12-1.11.1.6-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:8f57a0051dcf2543f6dc2b98a98cb2719c37d3cee1baba8965d57f3bbc90d4db"}, @@ -1567,6 +1723,8 @@ version = "10.3.7.77" description = "CURAND native runtime libraries" optional = false python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_curand_cu12-10.3.7.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:6e82df077060ea28e37f48a3ec442a8f47690c7499bff392a5938614b56c98d8"}, {file = "nvidia_curand_cu12-10.3.7.77-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a42cd1344297f70b9e39a1e4f467a4e1c10f1da54ff7a85c12197f6c652c8bdf"}, @@ -1581,6 +1739,8 @@ version = "11.7.1.2" description = "CUDA solver native runtime libraries" optional = false python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0ce237ef60acde1efc457335a2ddadfd7610b892d94efee7b776c64bb1cac9e0"}, {file = "nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e9e49843a7707e42022babb9bcfa33c29857a93b88020c4e4434656a655b698c"}, @@ -1600,6 +1760,8 @@ version = "12.5.4.2" description = "CUSPARSE native runtime libraries" optional = false python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d25b62fb18751758fe3c93a4a08eff08effedfe4edf1c6bb5afd0890fe88f887"}, {file = "nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7aa32fa5470cf754f72d1116c7cbc300b4e638d3ae5304cfa4a638a5b87161b1"}, @@ -1617,6 +1779,8 @@ version = "0.6.3" description = "NVIDIA cuSPARSELt" optional = false python-versions = "*" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cusparselt_cu12-0.6.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8371549623ba601a06322af2133c4a44350575f5a3108fb75f3ef20b822ad5f1"}, {file = "nvidia_cusparselt_cu12-0.6.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:e5c8a26c36445dd2e6812f1177978a24e2d37cacce7e090f297a688d1ec44f46"}, @@ -1629,6 +1793,8 @@ version = "2.26.2" description = "NVIDIA Collective Communication Library (NCCL) Runtime" optional = false python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_nccl_cu12-2.26.2-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5c196e95e832ad30fbbb50381eb3cbd1fadd5675e587a548563993609af19522"}, {file = "nvidia_nccl_cu12-2.26.2-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:694cf3879a206553cc9d7dbda76b13efaf610fdb70a50cba303de1b0d1530ac6"}, @@ -1640,6 +1806,8 @@ version = "12.6.85" description = "Nvidia JIT LTO Library" optional = false python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:eedc36df9e88b682efe4309aa16b5b4e78c2407eac59e8c10a6a47535164369a"}, {file = "nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cf4eaa7d4b6b543ffd69d6abfb11efdeb2db48270d94dfd3a452c24150829e41"}, @@ -1652,6 +1820,8 @@ version = "12.6.77" description = "NVIDIA Tools Extension" optional = false python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f44f8d86bb7d5629988d61c8d3ae61dddb2015dee142740536bc7481b022fe4b"}, {file = "nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:adcaabb9d436c9761fca2b13959a2d237c5f9fd406c8e4b723c695409ff88059"}, @@ -1662,13 +1832,14 @@ files = [ [[package]] name = "oauthlib" -version = "3.2.2" +version = "3.3.1" description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, - {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, + {file = "oauthlib-3.3.1-py3-none-any.whl", hash = "sha256:88119c938d2b8fb88561af5f6ee0eec8cc8d552b7bb1f712743136eb7523b7a1"}, + {file = "oauthlib-3.3.1.tar.gz", hash = "sha256:0f0f8aa759826a193cf66c12ea1af1637f87b9b4622d46e866952bb022e538c9"}, ] [package.extras] @@ -1678,13 +1849,14 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] [[package]] name = "openai" -version = "1.84.0" +version = "1.91.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "openai-1.84.0-py3-none-any.whl", hash = "sha256:7ec4436c3c933d68dc0f5a0cef0cb3dbc0864a54d62bddaf2ed5f3d521844711"}, - {file = "openai-1.84.0.tar.gz", hash = "sha256:4caa43bdab262cc75680ce1a2322cfc01626204074f7e8d9939ab372acf61698"}, + {file = "openai-1.91.0-py3-none-any.whl", hash = "sha256:207f87aa3bc49365e014fac2f7e291b99929f4fe126c4654143440e0ad446a5f"}, + {file = "openai-1.91.0.tar.gz", hash = "sha256:d6b07730d2f7c6745d0991997c16f85cddfc90ddcde8d569c862c30716b9fc90"}, ] [package.dependencies] @@ -1698,6 +1870,7 @@ tqdm = ">4" typing-extensions = ">=4.11,<5" [package.extras] +aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.6)"] datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] realtime = ["websockets (>=13,<16)"] voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"] @@ -1708,6 +1881,7 @@ version = "0.6.3" description = "OpenAPI schema validation for Python" optional = false python-versions = "<4.0.0,>=3.8.0" +groups = ["main"] files = [ {file = "openapi_schema_validator-0.6.3-py3-none-any.whl", hash = "sha256:f3b9870f4e556b5a62a1c39da72a6b4b16f3ad9c73dc80084b1b11e74ba148a3"}, {file = "openapi_schema_validator-0.6.3.tar.gz", hash = "sha256:f37bace4fc2a5d96692f4f8b31dc0f8d7400fd04f3a937798eaf880d425de6ee"}, @@ -1724,6 +1898,7 @@ version = "0.7.1" description = "OpenAPI 2.0 (aka Swagger) and OpenAPI 3 spec validator" optional = false python-versions = ">=3.8.0,<4.0.0" +groups = ["main"] files = [ {file = "openapi_spec_validator-0.7.1-py3-none-any.whl", hash = "sha256:3c81825043f24ccbcd2f4b149b11e8231abce5ba84f37065e14ec947d8f4e959"}, {file = "openapi_spec_validator-0.7.1.tar.gz", hash = "sha256:8577b85a8268685da6f8aa30990b83b7960d4d1117e901d451b5d572605e5ec7"}, @@ -1741,6 +1916,7 @@ version = "25.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, @@ -1752,6 +1928,7 @@ version = "2.2.2" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "pandas-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce"}, {file = "pandas-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7adfc142dac335d8c1e0dcbd37eb8617eac386596eb9e1a1b77791cf2498238"}, @@ -1824,6 +2001,7 @@ version = "0.4.4" description = "Object-oriented paths" optional = false python-versions = "<4.0.0,>=3.7.0" +groups = ["main"] files = [ {file = "pathable-0.4.4-py3-none-any.whl", hash = "sha256:5ae9e94793b6ef5a4cbe0a7ce9dbbefc1eec38df253763fd0aeeacf2762dbbc2"}, {file = "pathable-0.4.4.tar.gz", hash = "sha256:6905a3cd17804edfac7875b5f6c9142a218c7caef78693c2dbbbfbac186d88b2"}, @@ -1835,6 +2013,7 @@ version = "10.3.0" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"}, {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"}, @@ -1912,7 +2091,7 @@ docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline fpx = ["olefile"] mic = ["olefile"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] -typing = ["typing-extensions"] +typing = ["typing-extensions ; python_version < \"3.10\""] xmp = ["defusedxml"] [[package]] @@ -1921,6 +2100,7 @@ version = "4.3.8" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, @@ -1937,6 +2117,7 @@ version = "1.6.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, @@ -1952,6 +2133,7 @@ version = "23.6.21.0" description = "Resolving Swagger/OpenAPI 2.0 and 3.0.0 Parser" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "prance-23.6.21.0-py3-none-any.whl", hash = "sha256:6a4276fa07ed9f22feda4331097d7503c4adc3097e46ffae97425f2c1026bd9f"}, {file = "prance-23.6.21.0.tar.gz", hash = "sha256:d8c15f8ac34019751cc4945f866d8d964d7888016d10de3592e339567177cabe"}, @@ -1978,6 +2160,7 @@ version = "3.7.1" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "pre_commit-3.7.1-py2.py3-none-any.whl", hash = "sha256:fae36fd1d7ad7d6a5a1c0b0d5adb2ed1a3bda5a21bf6c3e5372073d7a11cd4c5"}, {file = "pre_commit-3.7.1.tar.gz", hash = "sha256:8ca3ad567bc78a4972a3f1a477e94a79d4597e8140a6e0b651c5e33899c3654a"}, @@ -1996,6 +2179,7 @@ version = "5.9.8" description = "Cross-platform lib for process and system monitoring in Python." optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +groups = ["main"] files = [ {file = "psutil-5.9.8-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8"}, {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73"}, @@ -2016,7 +2200,7 @@ files = [ ] [package.extras] -test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] +test = ["enum34 ; python_version <= \"3.4\"", "ipaddress ; python_version < \"3.0\"", "mock ; python_version < \"3.0\"", "pywin32 ; sys_platform == \"win32\"", "wmi ; sys_platform == \"win32\""] [[package]] name = "pycparser" @@ -2024,6 +2208,7 @@ version = "2.22" description = "C parser in Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, @@ -2031,13 +2216,14 @@ files = [ [[package]] name = "pydantic" -version = "2.11.5" +version = "2.11.7" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "pydantic-2.11.5-py3-none-any.whl", hash = "sha256:f9c26ba06f9747749ca1e5c94d6a85cb84254577553c8785576fd38fa64dc0f7"}, - {file = "pydantic-2.11.5.tar.gz", hash = "sha256:7f853db3d0ce78ce8bbb148c401c2cdd6431b3473c0cdff2755c7690952a7b7a"}, + {file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"}, + {file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"}, ] [package.dependencies] @@ -2048,7 +2234,7 @@ typing-inspection = ">=0.4.0" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" @@ -2056,6 +2242,7 @@ version = "2.33.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, @@ -2163,13 +2350,14 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pydantic-settings" -version = "2.9.1" +version = "2.10.1" description = "Settings management using Pydantic" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "pydantic_settings-2.9.1-py3-none-any.whl", hash = "sha256:59b4f431b1defb26fe620c71a7d3968a710d719f5f4cdbbdb7926edeb770f6ef"}, - {file = "pydantic_settings-2.9.1.tar.gz", hash = "sha256:c509bf79d27563add44e8446233359004ed85066cd096d8b510f715e6ef5d268"}, + {file = "pydantic_settings-2.10.1-py3-none-any.whl", hash = "sha256:a60952460b99cf661dc25c29c0ef171721f98bfcb52ef8d9ea4c943d7c8cc796"}, + {file = "pydantic_settings-2.10.1.tar.gz", hash = "sha256:06f0062169818d0f5524420a360d632d5857b83cffd4d42fe29597807a1614ee"}, ] [package.dependencies] @@ -2186,13 +2374,14 @@ yaml = ["pyyaml (>=6.0.1)"] [[package]] name = "pypdf" -version = "5.6.0" +version = "5.6.1" description = "A pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "pypdf-5.6.0-py3-none-any.whl", hash = "sha256:ca6bf446bfb0a2d8d71d6d6bb860798d864c36a29b3d9ae8d7fc7958c59f88e7"}, - {file = "pypdf-5.6.0.tar.gz", hash = "sha256:a4b6538b77fc796622000db7127e4e58039ec5e6afd292f8e9bf42e2e985a749"}, + {file = "pypdf-5.6.1-py3-none-any.whl", hash = "sha256:ff09d03d37addbc40f75db3624997a660ff5fe41c61e7ae4db6828dc3f581e4d"}, + {file = "pypdf-5.6.1.tar.gz", hash = "sha256:dde36cd67afe3afd733a562a0dd08a3c1dcdf01fe01de13785291319c8a883ff"}, ] [package.dependencies] @@ -2212,6 +2401,7 @@ version = "8.2.0" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pytest-8.2.0-py3-none-any.whl", hash = "sha256:1733f0620f6cda4095bbf0d9ff8022486e91892245bb9e7d5542c018f612f233"}, {file = "pytest-8.2.0.tar.gz", hash = "sha256:d507d4482197eac0ba2bae2e9babf0672eb333017bcedaa5fb1a3d42c1174b3f"}, @@ -2234,6 +2424,7 @@ version = "0.23.6" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pytest-asyncio-0.23.6.tar.gz", hash = "sha256:ffe523a89c1c222598c76856e76852b787504ddb72dd5d9b6617ffa8aa2cde5f"}, {file = "pytest_asyncio-0.23.6-py3-none-any.whl", hash = "sha256:68516fdd1018ac57b846c9846b954f0393b26f094764a28c955eabb0536a4e8a"}, @@ -2252,6 +2443,7 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -2262,13 +2454,14 @@ six = ">=1.5" [[package]] name = "python-dotenv" -version = "1.1.0" +version = "1.1.1" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"}, - {file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"}, + {file = "python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc"}, + {file = "python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab"}, ] [package.extras] @@ -2280,6 +2473,7 @@ version = "2025.2.18" description = "ISO 639 language codes, names, and other associated information" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "python_iso639-2025.2.18-py3-none-any.whl", hash = "sha256:b2d471c37483a26f19248458b20e7bd96492e15368b01053b540126bcc23152f"}, {file = "python_iso639-2025.2.18.tar.gz", hash = "sha256:34e31e8e76eb3fc839629e257b12bcfd957c6edcbd486bbf66ba5185d1f566e8"}, @@ -2294,6 +2488,7 @@ version = "0.4.27" description = "File type identification using libmagic" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["main"] files = [ {file = "python-magic-0.4.27.tar.gz", hash = "sha256:c1ba14b08e4a5f5c31a302b7721239695b2f0f058d125bd5ce1ee36b9d9d3c3b"}, {file = "python_magic-0.4.27-py2.py3-none-any.whl", hash = "sha256:c212960ad306f700aa0d01e5d7a325d20548ff97eb9920dcd29513174f0294d3"}, @@ -2305,6 +2500,7 @@ version = "0.0.20" description = "A streaming multipart parser for Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104"}, {file = "python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13"}, @@ -2316,6 +2512,7 @@ version = "2025.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, @@ -2327,6 +2524,7 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -2389,6 +2587,7 @@ version = "3.13.0" description = "rapid fuzzy string matching" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "rapidfuzz-3.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:aafc42a1dc5e1beeba52cd83baa41372228d6d8266f6d803c16dbabbcc156255"}, {file = "rapidfuzz-3.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:85c9a131a44a95f9cac2eb6e65531db014e09d89c4f18c7b1fa54979cb9ff1f3"}, @@ -2495,6 +2694,7 @@ version = "0.36.2" description = "JSON Referencing + Python" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"}, {file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"}, @@ -2511,6 +2711,7 @@ version = "2024.11.6" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, @@ -2610,18 +2811,19 @@ files = [ [[package]] name = "requests" -version = "2.32.3" +version = "2.32.4" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, - {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, + {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, + {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, ] [package.dependencies] certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" +charset_normalizer = ">=2,<4" idna = ">=2.5,<4" urllib3 = ">=1.21.1,<3" @@ -2635,6 +2837,7 @@ version = "2.0.0" description = "OAuthlib authentication support for Requests." optional = false python-versions = ">=3.4" +groups = ["main"] files = [ {file = "requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9"}, {file = "requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36"}, @@ -2653,6 +2856,7 @@ version = "1.0.0" description = "A utility belt for advanced users of python-requests" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["main"] files = [ {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"}, {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"}, @@ -2667,6 +2871,7 @@ version = "0.1.4" description = "A pure python RFC3339 validator" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["main"] files = [ {file = "rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa"}, {file = "rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b"}, @@ -2681,6 +2886,7 @@ version = "0.25.1" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "rpds_py-0.25.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f4ad628b5174d5315761b67f212774a32f5bad5e61396d38108bd801c0a8f5d9"}, {file = "rpds_py-0.25.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8c742af695f7525e559c16f1562cf2323db0e3f0fbdcabdf6865b095256b2d40"}, @@ -2803,13 +3009,14 @@ files = [ [[package]] name = "ruamel-yaml" -version = "0.18.12" +version = "0.18.14" description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "ruamel.yaml-0.18.12-py3-none-any.whl", hash = "sha256:790ba4c48b6a6e6b12b532a7308779eb12d2aaab3a80fdb8389216f28ea2b287"}, - {file = "ruamel.yaml-0.18.12.tar.gz", hash = "sha256:5a38fd5ce39d223bebb9e3a6779e86b9427a03fb0bf9f270060f8b149cffe5e2"}, + {file = "ruamel.yaml-0.18.14-py3-none-any.whl", hash = "sha256:710ff198bb53da66718c7db27eec4fbcc9aa6ca7204e4c1df2f282b6fe5eb6b2"}, + {file = "ruamel.yaml-0.18.14.tar.gz", hash = "sha256:7227b76aaec364df15936730efbf7d72b30c0b79b1d578bbb8e3dcb2d81f52b7"}, ] [package.dependencies] @@ -2825,6 +3032,8 @@ version = "0.2.12" description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" optional = false python-versions = ">=3.9" +groups = ["main"] +markers = "platform_python_implementation == \"CPython\"" files = [ {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:11f891336688faf5156a36293a9c362bdc7c88f03a8a027c2c1d8e0bcde998e5"}, {file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:a606ef75a60ecf3d924613892cc603b154178ee25abb3055db5062da811fd969"}, @@ -2880,6 +3089,7 @@ version = "0.5.3" description = "" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "safetensors-0.5.3-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bd20eb133db8ed15b40110b7c00c6df51655a2998132193de2f75f72d99c7073"}, {file = "safetensors-0.5.3-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:21d01c14ff6c415c485616b8b0bf961c46b3b343ca59110d38d744e577f9cce7"}, @@ -2913,57 +3123,53 @@ torch = ["safetensors[numpy]", "torch (>=1.10)"] [[package]] name = "scikit-learn" -version = "1.6.1" +version = "1.7.0" description = "A set of python modules for machine learning and data mining" optional = false -python-versions = ">=3.9" -files = [ - {file = "scikit_learn-1.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d056391530ccd1e501056160e3c9673b4da4805eb67eb2bdf4e983e1f9c9204e"}, - {file = "scikit_learn-1.6.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:0c8d036eb937dbb568c6242fa598d551d88fb4399c0344d95c001980ec1c7d36"}, - {file = "scikit_learn-1.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8634c4bd21a2a813e0a7e3900464e6d593162a29dd35d25bdf0103b3fce60ed5"}, - {file = "scikit_learn-1.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:775da975a471c4f6f467725dff0ced5c7ac7bda5e9316b260225b48475279a1b"}, - {file = "scikit_learn-1.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:8a600c31592bd7dab31e1c61b9bbd6dea1b3433e67d264d17ce1017dbdce8002"}, - {file = "scikit_learn-1.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:72abc587c75234935e97d09aa4913a82f7b03ee0b74111dcc2881cba3c5a7b33"}, - {file = "scikit_learn-1.6.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:b3b00cdc8f1317b5f33191df1386c0befd16625f49d979fe77a8d44cae82410d"}, - {file = "scikit_learn-1.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc4765af3386811c3ca21638f63b9cf5ecf66261cc4815c1db3f1e7dc7b79db2"}, - {file = "scikit_learn-1.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25fc636bdaf1cc2f4a124a116312d837148b5e10872147bdaf4887926b8c03d8"}, - {file = "scikit_learn-1.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:fa909b1a36e000a03c382aade0bd2063fd5680ff8b8e501660c0f59f021a6415"}, - {file = "scikit_learn-1.6.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:926f207c804104677af4857b2c609940b743d04c4c35ce0ddc8ff4f053cddc1b"}, - {file = "scikit_learn-1.6.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2c2cae262064e6a9b77eee1c8e768fc46aa0b8338c6a8297b9b6759720ec0ff2"}, - {file = "scikit_learn-1.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1061b7c028a8663fb9a1a1baf9317b64a257fcb036dae5c8752b2abef31d136f"}, - {file = "scikit_learn-1.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e69fab4ebfc9c9b580a7a80111b43d214ab06250f8a7ef590a4edf72464dd86"}, - {file = "scikit_learn-1.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:70b1d7e85b1c96383f872a519b3375f92f14731e279a7b4c6cfd650cf5dffc52"}, - {file = "scikit_learn-1.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ffa1e9e25b3d93990e74a4be2c2fc61ee5af85811562f1288d5d055880c4322"}, - {file = "scikit_learn-1.6.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:dc5cf3d68c5a20ad6d571584c0750ec641cc46aeef1c1507be51300e6003a7e1"}, - {file = "scikit_learn-1.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c06beb2e839ecc641366000ca84f3cf6fa9faa1777e29cf0c04be6e4d096a348"}, - {file = "scikit_learn-1.6.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8ca8cb270fee8f1f76fa9bfd5c3507d60c6438bbee5687f81042e2bb98e5a97"}, - {file = "scikit_learn-1.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:7a1c43c8ec9fde528d664d947dc4c0789be4077a3647f232869f41d9bf50e0fb"}, - {file = "scikit_learn-1.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a17c1dea1d56dcda2fac315712f3651a1fea86565b64b48fa1bc090249cbf236"}, - {file = "scikit_learn-1.6.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:6a7aa5f9908f0f28f4edaa6963c0a6183f1911e63a69aa03782f0d924c830a35"}, - {file = "scikit_learn-1.6.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0650e730afb87402baa88afbf31c07b84c98272622aaba002559b614600ca691"}, - {file = "scikit_learn-1.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:3f59fe08dc03ea158605170eb52b22a105f238a5d512c4470ddeca71feae8e5f"}, - {file = "scikit_learn-1.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6849dd3234e87f55dce1db34c89a810b489ead832aaf4d4550b7ea85628be6c1"}, - {file = "scikit_learn-1.6.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e7be3fa5d2eb9be7d77c3734ff1d599151bb523674be9b834e8da6abe132f44e"}, - {file = "scikit_learn-1.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44a17798172df1d3c1065e8fcf9019183f06c87609b49a124ebdf57ae6cb0107"}, - {file = "scikit_learn-1.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8b7a3b86e411e4bce21186e1c180d792f3d99223dcfa3b4f597ecc92fa1a422"}, - {file = "scikit_learn-1.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:7a73d457070e3318e32bdb3aa79a8d990474f19035464dfd8bede2883ab5dc3b"}, - {file = "scikit_learn-1.6.1.tar.gz", hash = "sha256:b4fc2525eca2c69a59260f583c56a7557c6ccdf8deafdba6e060f94c1c59738e"}, +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "scikit_learn-1.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9fe7f51435f49d97bd41d724bb3e11eeb939882af9c29c931a8002c357e8cdd5"}, + {file = "scikit_learn-1.7.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d0c93294e1e1acbee2d029b1f2a064f26bd928b284938d51d412c22e0c977eb3"}, + {file = "scikit_learn-1.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf3755f25f145186ad8c403312f74fb90df82a4dfa1af19dc96ef35f57237a94"}, + {file = "scikit_learn-1.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2726c8787933add436fb66fb63ad18e8ef342dfb39bbbd19dc1e83e8f828a85a"}, + {file = "scikit_learn-1.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:e2539bb58886a531b6e86a510c0348afaadd25005604ad35966a85c2ec378800"}, + {file = "scikit_learn-1.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ef09b1615e1ad04dc0d0054ad50634514818a8eb3ee3dee99af3bffc0ef5007"}, + {file = "scikit_learn-1.7.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:7d7240c7b19edf6ed93403f43b0fcb0fe95b53bc0b17821f8fb88edab97085ef"}, + {file = "scikit_learn-1.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80bd3bd4e95381efc47073a720d4cbab485fc483966f1709f1fd559afac57ab8"}, + {file = "scikit_learn-1.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9dbe48d69aa38ecfc5a6cda6c5df5abef0c0ebdb2468e92437e2053f84abb8bc"}, + {file = "scikit_learn-1.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:8fa979313b2ffdfa049ed07252dc94038def3ecd49ea2a814db5401c07f1ecfa"}, + {file = "scikit_learn-1.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c2c7243d34aaede0efca7a5a96d67fddaebb4ad7e14a70991b9abee9dc5c0379"}, + {file = "scikit_learn-1.7.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:9f39f6a811bf3f15177b66c82cbe0d7b1ebad9f190737dcdef77cfca1ea3c19c"}, + {file = "scikit_learn-1.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63017a5f9a74963d24aac7590287149a8d0f1a0799bbe7173c0d8ba1523293c0"}, + {file = "scikit_learn-1.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b2f8a0b1e73e9a08b7cc498bb2aeab36cdc1f571f8ab2b35c6e5d1c7115d97d"}, + {file = "scikit_learn-1.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:34cc8d9d010d29fb2b7cbcd5ccc24ffdd80515f65fe9f1e4894ace36b267ce19"}, + {file = "scikit_learn-1.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5b7974f1f32bc586c90145df51130e02267e4b7e77cab76165c76cf43faca0d9"}, + {file = "scikit_learn-1.7.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:014e07a23fe02e65f9392898143c542a50b6001dbe89cb867e19688e468d049b"}, + {file = "scikit_learn-1.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7e7ced20582d3a5516fb6f405fd1d254e1f5ce712bfef2589f51326af6346e8"}, + {file = "scikit_learn-1.7.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1babf2511e6ffd695da7a983b4e4d6de45dce39577b26b721610711081850906"}, + {file = "scikit_learn-1.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:5abd2acff939d5bd4701283f009b01496832d50ddafa83c90125a4e41c33e314"}, + {file = "scikit_learn-1.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:e39d95a929b112047c25b775035c8c234c5ca67e681ce60d12413afb501129f7"}, + {file = "scikit_learn-1.7.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:0521cb460426c56fee7e07f9365b0f45ec8ca7b2d696534ac98bfb85e7ae4775"}, + {file = "scikit_learn-1.7.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:317ca9f83acbde2883bd6bb27116a741bfcb371369706b4f9973cf30e9a03b0d"}, + {file = "scikit_learn-1.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:126c09740a6f016e815ab985b21e3a0656835414521c81fc1a8da78b679bdb75"}, + {file = "scikit_learn-1.7.0.tar.gz", hash = "sha256:c01e869b15aec88e2cdb73d27f15bdbe03bce8e2fb43afbe77c45d399e73a5a3"}, ] [package.dependencies] joblib = ">=1.2.0" -numpy = ">=1.19.5" -scipy = ">=1.6.0" +numpy = ">=1.22.0" +scipy = ">=1.8.0" threadpoolctl = ">=3.1.0" [package.extras] -benchmark = ["matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "pandas (>=1.1.5)"] -build = ["cython (>=3.0.10)", "meson-python (>=0.16.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)"] -docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.17.1)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)", "towncrier (>=24.8.0)"] -examples = ["matplotlib (>=3.3.4)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)"] -install = ["joblib (>=1.2.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)", "threadpoolctl (>=3.1.0)"] -maintenance = ["conda-lock (==2.5.6)"] -tests = ["black (>=24.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.9)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.5.1)", "scikit-image (>=0.17.2)"] +benchmark = ["matplotlib (>=3.5.0)", "memory_profiler (>=0.57.0)", "pandas (>=1.4.0)"] +build = ["cython (>=3.0.10)", "meson-python (>=0.16.0)", "numpy (>=1.22.0)", "scipy (>=1.8.0)"] +docs = ["Pillow (>=8.4.0)", "matplotlib (>=3.5.0)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.4.0)", "plotly (>=5.14.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.19.0)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.17.1)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)", "towncrier (>=24.8.0)"] +examples = ["matplotlib (>=3.5.0)", "pandas (>=1.4.0)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.19.0)", "seaborn (>=0.9.0)"] +install = ["joblib (>=1.2.0)", "numpy (>=1.22.0)", "scipy (>=1.8.0)", "threadpoolctl (>=3.1.0)"] +maintenance = ["conda-lock (==3.0.1)"] +tests = ["matplotlib (>=3.5.0)", "mypy (>=1.15)", "numpydoc (>=1.2.0)", "pandas (>=1.4.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pyamg (>=4.2.1)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.11.7)", "scikit-image (>=0.19.0)"] [[package]] name = "scipy" @@ -2971,6 +3177,8 @@ version = "1.15.3" description = "Fundamental algorithms for scientific computing in Python" optional = false python-versions = ">=3.10" +groups = ["main"] +markers = "python_version < \"3.11\"" files = [ {file = "scipy-1.15.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:a345928c86d535060c9c2b25e71e87c39ab2f22fc96e9636bd74d1dbf9de448c"}, {file = "scipy-1.15.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:ad3432cb0f9ed87477a8d97f03b763fd1d57709f1bbde3c9369b1dff5503b253"}, @@ -3026,7 +3234,63 @@ numpy = ">=1.23.5,<2.5" [package.extras] dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.19.1)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.0.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"] -test = ["Cython", "array-api-strict (>=2.0,<2.1.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +test = ["Cython", "array-api-strict (>=2.0,<2.1.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja ; sys_platform != \"emscripten\"", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "scipy" +version = "1.16.0" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.11" +groups = ["main"] +markers = "python_version == \"3.11\"" +files = [ + {file = "scipy-1.16.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:deec06d831b8f6b5fb0b652433be6a09db29e996368ce5911faf673e78d20085"}, + {file = "scipy-1.16.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d30c0fe579bb901c61ab4bb7f3eeb7281f0d4c4a7b52dbf563c89da4fd2949be"}, + {file = "scipy-1.16.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:b2243561b45257f7391d0f49972fca90d46b79b8dbcb9b2cb0f9df928d370ad4"}, + {file = "scipy-1.16.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:e6d7dfc148135e9712d87c5f7e4f2ddc1304d1582cb3a7d698bbadedb61c7afd"}, + {file = "scipy-1.16.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:90452f6a9f3fe5a2cf3748e7be14f9cc7d9b124dce19667b54f5b429d680d539"}, + {file = "scipy-1.16.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a2f0bf2f58031c8701a8b601df41701d2a7be17c7ffac0a4816aeba89c4cdac8"}, + {file = "scipy-1.16.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6c4abb4c11fc0b857474241b812ce69ffa6464b4bd8f4ecb786cf240367a36a7"}, + {file = "scipy-1.16.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b370f8f6ac6ef99815b0d5c9f02e7ade77b33007d74802efc8316c8db98fd11e"}, + {file = "scipy-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:a16ba90847249bedce8aa404a83fb8334b825ec4a8e742ce6012a7a5e639f95c"}, + {file = "scipy-1.16.0-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:7eb6bd33cef4afb9fa5f1fb25df8feeb1e52d94f21a44f1d17805b41b1da3180"}, + {file = "scipy-1.16.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:1dbc8fdba23e4d80394ddfab7a56808e3e6489176d559c6c71935b11a2d59db1"}, + {file = "scipy-1.16.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:7dcf42c380e1e3737b343dec21095c9a9ad3f9cbe06f9c05830b44b1786c9e90"}, + {file = "scipy-1.16.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:26ec28675f4a9d41587266084c626b02899db373717d9312fa96ab17ca1ae94d"}, + {file = "scipy-1.16.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:952358b7e58bd3197cfbd2f2f2ba829f258404bdf5db59514b515a8fe7a36c52"}, + {file = "scipy-1.16.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:03931b4e870c6fef5b5c0970d52c9f6ddd8c8d3e934a98f09308377eba6f3824"}, + {file = "scipy-1.16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:512c4f4f85912767c351a0306824ccca6fd91307a9f4318efe8fdbd9d30562ef"}, + {file = "scipy-1.16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e69f798847e9add03d512eaf5081a9a5c9a98757d12e52e6186ed9681247a1ac"}, + {file = "scipy-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:adf9b1999323ba335adc5d1dc7add4781cb5a4b0ef1e98b79768c05c796c4e49"}, + {file = "scipy-1.16.0-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:e9f414cbe9ca289a73e0cc92e33a6a791469b6619c240aa32ee18abdce8ab451"}, + {file = "scipy-1.16.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:bbba55fb97ba3cdef9b1ee973f06b09d518c0c7c66a009c729c7d1592be1935e"}, + {file = "scipy-1.16.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:58e0d4354eacb6004e7aa1cd350e5514bd0270acaa8d5b36c0627bb3bb486974"}, + {file = "scipy-1.16.0-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:75b2094ec975c80efc273567436e16bb794660509c12c6a31eb5c195cbf4b6dc"}, + {file = "scipy-1.16.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6b65d232157a380fdd11a560e7e21cde34fdb69d65c09cb87f6cc024ee376351"}, + {file = "scipy-1.16.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1d8747f7736accd39289943f7fe53a8333be7f15a82eea08e4afe47d79568c32"}, + {file = "scipy-1.16.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:eb9f147a1b8529bb7fec2a85cf4cf42bdfadf9e83535c309a11fdae598c88e8b"}, + {file = "scipy-1.16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d2b83c37edbfa837a8923d19c749c1935ad3d41cf196006a24ed44dba2ec4358"}, + {file = "scipy-1.16.0-cp313-cp313-win_amd64.whl", hash = "sha256:79a3c13d43c95aa80b87328a46031cf52508cf5f4df2767602c984ed1d3c6bbe"}, + {file = "scipy-1.16.0-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:f91b87e1689f0370690e8470916fe1b2308e5b2061317ff76977c8f836452a47"}, + {file = "scipy-1.16.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:88a6ca658fb94640079e7a50b2ad3b67e33ef0f40e70bdb7dc22017dae73ac08"}, + {file = "scipy-1.16.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:ae902626972f1bd7e4e86f58fd72322d7f4ec7b0cfc17b15d4b7006efc385176"}, + {file = "scipy-1.16.0-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:8cb824c1fc75ef29893bc32b3ddd7b11cf9ab13c1127fe26413a05953b8c32ed"}, + {file = "scipy-1.16.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:de2db7250ff6514366a9709c2cba35cb6d08498e961cba20d7cff98a7ee88938"}, + {file = "scipy-1.16.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e85800274edf4db8dd2e4e93034f92d1b05c9421220e7ded9988b16976f849c1"}, + {file = "scipy-1.16.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4f720300a3024c237ace1cb11f9a84c38beb19616ba7c4cdcd771047a10a1706"}, + {file = "scipy-1.16.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:aad603e9339ddb676409b104c48a027e9916ce0d2838830691f39552b38a352e"}, + {file = "scipy-1.16.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f56296fefca67ba605fd74d12f7bd23636267731a72cb3947963e76b8c0a25db"}, + {file = "scipy-1.16.0.tar.gz", hash = "sha256:b5ef54021e832869c8cfb03bc3bf20366cbcd426e02a58e8a58d7584dfbb8f62"}, +] + +[package.dependencies] +numpy = ">=1.25.2,<2.6" + +[package.extras] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] +doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.19.1)", "jupytext", "linkify-it-py", "matplotlib (>=3.5)", "myst-nb (>=1.2.0)", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.2.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"] +test = ["Cython", "array-api-strict (>=2.3.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja ; sys_platform != \"emscripten\"", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] [[package]] name = "sentence-transformers" @@ -3034,6 +3298,7 @@ version = "3.0.0" description = "Multilingual text embeddings" optional = false python-versions = ">=3.8.0" +groups = ["main"] files = [ {file = "sentence_transformers-3.0.0-py3-none-any.whl", hash = "sha256:9bf851b688b796e5fb06c920921efd5e5e05ee616e85cb3026fbdfe4dcf15bf3"}, {file = "sentence_transformers-3.0.0.tar.gz", hash = "sha256:52d4101654ed107a28e9fa5110fce399084b55e7838fd8256471353ddc299033"}, @@ -3059,19 +3324,21 @@ version = "80.9.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.9" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922"}, {file = "setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c"}, ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.8.0)"] -core = ["importlib_metadata (>=6)", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""] +core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] -test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] -type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.14.*)", "pytest-mypy"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] [[package]] name = "six" @@ -3079,6 +3346,7 @@ version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] files = [ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, @@ -3090,6 +3358,7 @@ version = "3.31.0" description = "The Slack API Platform SDK for Python" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "slack_sdk-3.31.0-py2.py3-none-any.whl", hash = "sha256:a120cc461e8ebb7d9175f171dbe0ded37a6878d9f7b96b28e4bad1227399047b"}, {file = "slack_sdk-3.31.0.tar.gz", hash = "sha256:740d2f9c49cbfcbd46fca56b4be9d527934c225312aac18fd2c0fca0ef6bc935"}, @@ -3104,6 +3373,7 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -3115,6 +3385,7 @@ version = "2.7" description = "A modern CSS selector implementation for Beautiful Soup." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4"}, {file = "soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a"}, @@ -3126,6 +3397,7 @@ version = "2.3.6" description = "SSE plugin for Starlette" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "sse_starlette-2.3.6-py3-none-any.whl", hash = "sha256:d49a8285b182f6e2228e2609c350398b2ca2c36216c2675d875f81e93548f760"}, {file = "sse_starlette-2.3.6.tar.gz", hash = "sha256:0382336f7d4ec30160cf9ca0518962905e1b69b72d6c1c995131e0a703b436e3"}, @@ -3142,17 +3414,19 @@ uvicorn = ["uvicorn (>=0.34.0)"] [[package]] name = "starlette" -version = "0.47.0" +version = "0.47.1" description = "The little ASGI library that shines." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "starlette-0.47.0-py3-none-any.whl", hash = "sha256:9d052d4933683af40ffd47c7465433570b4949dc937e20ad1d73b34e72f10c37"}, - {file = "starlette-0.47.0.tar.gz", hash = "sha256:1f64887e94a447fed5f23309fb6890ef23349b7e478faa7b24a851cd4eb844af"}, + {file = "starlette-0.47.1-py3-none-any.whl", hash = "sha256:5e11c9f5c7c3f24959edbf2dffdc01bba860228acf657129467d8a7468591527"}, + {file = "starlette-0.47.1.tar.gz", hash = "sha256:aef012dd2b6be325ffa16698f9dc533614fb1cebd593a906b90dc1025529a79b"}, ] [package.dependencies] anyio = ">=3.6.2,<5" +typing-extensions = {version = ">=4.10.0", markers = "python_version < \"3.13\""} [package.extras] full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] @@ -3163,6 +3437,7 @@ version = "1.14.0" description = "Computer algebra system (CAS) in Python" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5"}, {file = "sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517"}, @@ -3180,6 +3455,7 @@ version = "0.9.0" description = "Pretty-print tabular data" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, @@ -3194,6 +3470,7 @@ version = "1.7.0" description = "module to create simple ASCII tables" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "texttable-1.7.0-py2.py3-none-any.whl", hash = "sha256:72227d592c82b3d7f672731ae73e4d1f88cd8e2ef5b075a7a7f01a23a3743917"}, {file = "texttable-1.7.0.tar.gz", hash = "sha256:2d2068fb55115807d3ac77a4ca68fa48803e84ebb0ee2340f858107a36522638"}, @@ -3205,6 +3482,7 @@ version = "3.6.0" description = "threadpoolctl" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb"}, {file = "threadpoolctl-3.6.0.tar.gz", hash = "sha256:8ab8b4aa3491d812b623328249fab5302a68d2d71745c8a4c719a2fcaba9f44e"}, @@ -3216,6 +3494,7 @@ version = "0.7.0" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "tiktoken-0.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485f3cc6aba7c6b6ce388ba634fbba656d9ee27f766216f45146beb4ac18b25f"}, {file = "tiktoken-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e54be9a2cd2f6d6ffa3517b064983fb695c9a9d8aa7d574d1ef3c3f931a99225"}, @@ -3264,26 +3543,27 @@ blobfile = ["blobfile (>=2)"] [[package]] name = "tokenizers" -version = "0.21.1" +version = "0.21.2" description = "" optional = false python-versions = ">=3.9" -files = [ - {file = "tokenizers-0.21.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:e78e413e9e668ad790a29456e677d9d3aa50a9ad311a40905d6861ba7692cf41"}, - {file = "tokenizers-0.21.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:cd51cd0a91ecc801633829fcd1fda9cf8682ed3477c6243b9a095539de4aecf3"}, - {file = "tokenizers-0.21.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28da6b72d4fb14ee200a1bd386ff74ade8992d7f725f2bde2c495a9a98cf4d9f"}, - {file = "tokenizers-0.21.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:34d8cfde551c9916cb92014e040806122295a6800914bab5865deb85623931cf"}, - {file = "tokenizers-0.21.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aaa852d23e125b73d283c98f007e06d4595732104b65402f46e8ef24b588d9f8"}, - {file = "tokenizers-0.21.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a21a15d5c8e603331b8a59548bbe113564136dc0f5ad8306dd5033459a226da0"}, - {file = "tokenizers-0.21.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2fdbd4c067c60a0ac7eca14b6bd18a5bebace54eb757c706b47ea93204f7a37c"}, - {file = "tokenizers-0.21.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dd9a0061e403546f7377df940e866c3e678d7d4e9643d0461ea442b4f89e61a"}, - {file = "tokenizers-0.21.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:db9484aeb2e200c43b915a1a0150ea885e35f357a5a8fabf7373af333dcc8dbf"}, - {file = "tokenizers-0.21.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:ed248ab5279e601a30a4d67bdb897ecbe955a50f1e7bb62bd99f07dd11c2f5b6"}, - {file = "tokenizers-0.21.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:9ac78b12e541d4ce67b4dfd970e44c060a2147b9b2a21f509566d556a509c67d"}, - {file = "tokenizers-0.21.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:e5a69c1a4496b81a5ee5d2c1f3f7fbdf95e90a0196101b0ee89ed9956b8a168f"}, - {file = "tokenizers-0.21.1-cp39-abi3-win32.whl", hash = "sha256:1039a3a5734944e09de1d48761ade94e00d0fa760c0e0551151d4dd851ba63e3"}, - {file = "tokenizers-0.21.1-cp39-abi3-win_amd64.whl", hash = "sha256:0f0dcbcc9f6e13e675a66d7a5f2f225a736745ce484c1a4e07476a89ccdad382"}, - {file = "tokenizers-0.21.1.tar.gz", hash = "sha256:a1bb04dc5b448985f86ecd4b05407f5a8d97cb2c0532199b2a302a604a0165ab"}, +groups = ["main"] +files = [ + {file = "tokenizers-0.21.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:342b5dfb75009f2255ab8dec0041287260fed5ce00c323eb6bab639066fef8ec"}, + {file = "tokenizers-0.21.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:126df3205d6f3a93fea80c7a8a266a78c1bd8dd2fe043386bafdd7736a23e45f"}, + {file = "tokenizers-0.21.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a32cd81be21168bd0d6a0f0962d60177c447a1aa1b1e48fa6ec9fc728ee0b12"}, + {file = "tokenizers-0.21.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8bd8999538c405133c2ab999b83b17c08b7fc1b48c1ada2469964605a709ef91"}, + {file = "tokenizers-0.21.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5e9944e61239b083a41cf8fc42802f855e1dca0f499196df37a8ce219abac6eb"}, + {file = "tokenizers-0.21.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:514cd43045c5d546f01142ff9c79a96ea69e4b5cda09e3027708cb2e6d5762ab"}, + {file = "tokenizers-0.21.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b1b9405822527ec1e0f7d8d2fdb287a5730c3a6518189c968254a8441b21faae"}, + {file = "tokenizers-0.21.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fed9a4d51c395103ad24f8e7eb976811c57fbec2af9f133df471afcd922e5020"}, + {file = "tokenizers-0.21.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2c41862df3d873665ec78b6be36fcc30a26e3d4902e9dd8608ed61d49a48bc19"}, + {file = "tokenizers-0.21.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:ed21dc7e624e4220e21758b2e62893be7101453525e3d23264081c9ef9a6d00d"}, + {file = "tokenizers-0.21.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:0e73770507e65a0e0e2a1affd6b03c36e3bc4377bd10c9ccf51a82c77c0fe365"}, + {file = "tokenizers-0.21.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:106746e8aa9014a12109e58d540ad5465b4c183768ea96c03cbc24c44d329958"}, + {file = "tokenizers-0.21.2-cp39-abi3-win32.whl", hash = "sha256:cabda5a6d15d620b6dfe711e1af52205266d05b379ea85a8a301b3593c60e962"}, + {file = "tokenizers-0.21.2-cp39-abi3-win_amd64.whl", hash = "sha256:58747bb898acdb1007f37a7bbe614346e98dc28708ffb66a3fd50ce169ac6c98"}, + {file = "tokenizers-0.21.2.tar.gz", hash = "sha256:fdc7cffde3e2113ba0e6cc7318c40e3438a4d74bbc62bf04bcc63bdfb082ac77"}, ] [package.dependencies] @@ -3300,6 +3580,8 @@ version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -3337,35 +3619,36 @@ files = [ [[package]] name = "torch" -version = "2.7.0" +version = "2.7.1" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = false python-versions = ">=3.9.0" -files = [ - {file = "torch-2.7.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c9afea41b11e1a1ab1b258a5c31afbd646d6319042bfe4f231b408034b51128b"}, - {file = "torch-2.7.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:0b9960183b6e5b71239a3e6c883d8852c304e691c0b2955f7045e8a6d05b9183"}, - {file = "torch-2.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:2ad79d0d8c2a20a37c5df6052ec67c2078a2c4e9a96dd3a8b55daaff6d28ea29"}, - {file = "torch-2.7.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:34e0168ed6de99121612d72224e59b2a58a83dae64999990eada7260c5dd582d"}, - {file = "torch-2.7.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2b7813e904757b125faf1a9a3154e1d50381d539ced34da1992f52440567c156"}, - {file = "torch-2.7.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:fd5cfbb4c3bbadd57ad1b27d56a28008f8d8753733411a140fcfb84d7f933a25"}, - {file = "torch-2.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:58df8d5c2eeb81305760282b5069ea4442791a6bbf0c74d9069b7b3304ff8a37"}, - {file = "torch-2.7.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:0a8d43caa342b9986101ec5feb5bbf1d86570b5caa01e9cb426378311258fdde"}, - {file = "torch-2.7.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:36a6368c7ace41ad1c0f69f18056020b6a5ca47bedaca9a2f3b578f5a104c26c"}, - {file = "torch-2.7.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:15aab3e31c16feb12ae0a88dba3434a458874636f360c567caa6a91f6bfba481"}, - {file = "torch-2.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:f56d4b2510934e072bab3ab8987e00e60e1262fb238176168f5e0c43a1320c6d"}, - {file = "torch-2.7.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:30b7688a87239a7de83f269333651d8e582afffce6f591fff08c046f7787296e"}, - {file = "torch-2.7.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:868ccdc11798535b5727509480cd1d86d74220cfdc42842c4617338c1109a205"}, - {file = "torch-2.7.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:9b52347118116cf3dff2ab5a3c3dd97c719eb924ac658ca2a7335652076df708"}, - {file = "torch-2.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:434cf3b378340efc87c758f250e884f34460624c0523fe5c9b518d205c91dd1b"}, - {file = "torch-2.7.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:edad98dddd82220465b106506bb91ee5ce32bd075cddbcf2b443dfaa2cbd83bf"}, - {file = "torch-2.7.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:2a885fc25afefb6e6eb18a7d1e8bfa01cc153e92271d980a49243b250d5ab6d9"}, - {file = "torch-2.7.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:176300ff5bc11a5f5b0784e40bde9e10a35c4ae9609beed96b4aeb46a27f5fae"}, - {file = "torch-2.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:d0ca446a93f474985d81dc866fcc8dccefb9460a29a456f79d99c29a78a66993"}, - {file = "torch-2.7.0-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:27f5007bdf45f7bb7af7f11d1828d5c2487e030690afb3d89a651fd7036a390e"}, - {file = "torch-2.7.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:e362efaa5b3078e5f75c33efc05005b9b46de0d2e899519d5b4cad0e050ed0f7"}, - {file = "torch-2.7.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:fc1ed9258cbfce69970ff508ea60881818d414d098a800b7695ba36f570d34b0"}, - {file = "torch-2.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:87b0802cab44659fcb6bcf5678d58fa4a8b48561cde8fb2d317edf0b6990e1bb"}, - {file = "torch-2.7.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:ccd7509141713997861b7a947ef0a717143cd7e9240addd168f38ba8fd23fd56"}, +groups = ["main"] +files = [ + {file = "torch-2.7.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:a103b5d782af5bd119b81dbcc7ffc6fa09904c423ff8db397a1e6ea8fd71508f"}, + {file = "torch-2.7.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:fe955951bdf32d182ee8ead6c3186ad54781492bf03d547d31771a01b3d6fb7d"}, + {file = "torch-2.7.1-cp310-cp310-win_amd64.whl", hash = "sha256:885453d6fba67d9991132143bf7fa06b79b24352f4506fd4d10b309f53454162"}, + {file = "torch-2.7.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:d72acfdb86cee2a32c0ce0101606f3758f0d8bb5f8f31e7920dc2809e963aa7c"}, + {file = "torch-2.7.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:236f501f2e383f1cb861337bdf057712182f910f10aeaf509065d54d339e49b2"}, + {file = "torch-2.7.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:06eea61f859436622e78dd0cdd51dbc8f8c6d76917a9cf0555a333f9eac31ec1"}, + {file = "torch-2.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:8273145a2e0a3c6f9fd2ac36762d6ee89c26d430e612b95a99885df083b04e52"}, + {file = "torch-2.7.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:aea4fc1bf433d12843eb2c6b2204861f43d8364597697074c8d38ae2507f8730"}, + {file = "torch-2.7.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:27ea1e518df4c9de73af7e8a720770f3628e7f667280bce2be7a16292697e3fa"}, + {file = "torch-2.7.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:c33360cfc2edd976c2633b3b66c769bdcbbf0e0b6550606d188431c81e7dd1fc"}, + {file = "torch-2.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:d8bf6e1856ddd1807e79dc57e54d3335f2b62e6f316ed13ed3ecfe1fc1df3d8b"}, + {file = "torch-2.7.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:787687087412c4bd68d315e39bc1223f08aae1d16a9e9771d95eabbb04ae98fb"}, + {file = "torch-2.7.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:03563603d931e70722dce0e11999d53aa80a375a3d78e6b39b9f6805ea0a8d28"}, + {file = "torch-2.7.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:d632f5417b6980f61404a125b999ca6ebd0b8b4bbdbb5fbbba44374ab619a412"}, + {file = "torch-2.7.1-cp313-cp313-win_amd64.whl", hash = "sha256:23660443e13995ee93e3d844786701ea4ca69f337027b05182f5ba053ce43b38"}, + {file = "torch-2.7.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:0da4f4dba9f65d0d203794e619fe7ca3247a55ffdcbd17ae8fb83c8b2dc9b585"}, + {file = "torch-2.7.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:e08d7e6f21a617fe38eeb46dd2213ded43f27c072e9165dc27300c9ef9570934"}, + {file = "torch-2.7.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:30207f672328a42df4f2174b8f426f354b2baa0b7cca3a0adb3d6ab5daf00dc8"}, + {file = "torch-2.7.1-cp313-cp313t-win_amd64.whl", hash = "sha256:79042feca1c634aaf6603fe6feea8c6b30dfa140a6bbc0b973e2260c7e79a22e"}, + {file = "torch-2.7.1-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:988b0cbc4333618a1056d2ebad9eb10089637b659eb645434d0809d8d937b946"}, + {file = "torch-2.7.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:e0d81e9a12764b6f3879a866607c8ae93113cbcad57ce01ebde63eb48a576369"}, + {file = "torch-2.7.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:8394833c44484547ed4a47162318337b88c97acdb3273d85ea06e03ffff44998"}, + {file = "torch-2.7.1-cp39-cp39-win_amd64.whl", hash = "sha256:df41989d9300e6e3c19ec9f56f856187a6ef060c3662fe54f4b6baf1fc90bd19"}, + {file = "torch-2.7.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:a737b5edd1c44a5c1ece2e9f3d00df9d1b3fb9541138bee56d83d38293fb6c9d"}, ] [package.dependencies] @@ -3388,7 +3671,7 @@ nvidia-nccl-cu12 = {version = "2.26.2", markers = "platform_system == \"Linux\" nvidia-nvjitlink-cu12 = {version = "12.6.85", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-nvtx-cu12 = {version = "12.6.77", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} sympy = ">=1.13.3" -triton = {version = "3.3.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +triton = {version = "3.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} typing-extensions = ">=4.10.0" [package.extras] @@ -3401,6 +3684,7 @@ version = "4.67.1" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, @@ -3422,6 +3706,7 @@ version = "4.52.4" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = false python-versions = ">=3.9.0" +groups = ["main"] files = [ {file = "transformers-4.52.4-py3-none-any.whl", hash = "sha256:203f5c19416d5877e36e88633943761719538a25d9775977a24fe77a1e5adfc7"}, {file = "transformers-4.52.4.tar.gz", hash = "sha256:aff3764441c1adc192a08dba49740d3cbbcb72d850586075aed6bd89b98203e6"}, @@ -3489,17 +3774,19 @@ vision = ["Pillow (>=10.0.1,<=15.0)"] [[package]] name = "triton" -version = "3.3.0" +version = "3.3.1" description = "A language and compiler for custom Deep Learning operations" optional = false python-versions = "*" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ - {file = "triton-3.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fad99beafc860501d7fcc1fb7045d9496cbe2c882b1674640304949165a916e7"}, - {file = "triton-3.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3161a2bf073d6b22c4e2f33f951f3e5e3001462b2570e6df9cd57565bdec2984"}, - {file = "triton-3.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b68c778f6c4218403a6bd01be7484f6dc9e20fe2083d22dd8aef33e3b87a10a3"}, - {file = "triton-3.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47bc87ad66fa4ef17968299acacecaab71ce40a238890acc6ad197c3abe2b8f1"}, - {file = "triton-3.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ce4700fc14032af1e049005ae94ba908e71cd6c2df682239aed08e49bc71b742"}, - {file = "triton-3.3.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f41403bfa0cbb3e24fd958ca7fee04e9681e55e539296db9aca30c42acae693"}, + {file = "triton-3.3.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b74db445b1c562844d3cfad6e9679c72e93fdfb1a90a24052b03bb5c49d1242e"}, + {file = "triton-3.3.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b31e3aa26f8cb3cc5bf4e187bf737cbacf17311e1112b781d4a059353dfd731b"}, + {file = "triton-3.3.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9999e83aba21e1a78c1f36f21bce621b77bcaa530277a50484a7cb4a822f6e43"}, + {file = "triton-3.3.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b89d846b5a4198317fec27a5d3a609ea96b6d557ff44b56c23176546023c4240"}, + {file = "triton-3.3.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3198adb9d78b77818a5388bff89fa72ff36f9da0bc689db2f0a651a67ce6a42"}, + {file = "triton-3.3.1-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f6139aeb04a146b0b8e0fbbd89ad1e65861c57cfed881f21d62d3cb94a36bab7"}, ] [package.dependencies] @@ -3516,6 +3803,7 @@ version = "4.14.0" description = "Backported and Experimental Type Hints for Python 3.9+" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af"}, {file = "typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4"}, @@ -3527,6 +3815,7 @@ version = "0.9.0" description = "Runtime inspection utilities for typing module." optional = false python-versions = "*" +groups = ["main"] files = [ {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, @@ -3542,6 +3831,7 @@ version = "0.4.1" description = "Runtime typing introspection tools" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51"}, {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, @@ -3556,6 +3846,7 @@ version = "2025.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" +groups = ["main"] files = [ {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, @@ -3567,6 +3858,7 @@ version = "0.13.7" description = "A library that prepares raw documents for downstream ML tasks." optional = false python-versions = "<3.12,>=3.9.0" +groups = ["main"] files = [ {file = "unstructured-0.13.7-py3-none-any.whl", hash = "sha256:a3d8f3037cb3063661531c6ecc04aca6df93c293ba06e36d67ffc70857a6f208"}, {file = "unstructured-0.13.7.tar.gz", hash = "sha256:5d59161d353b7006d8c6ee6f1a39154a5a11a5aaa258aac3fe90a8d44016aa6c"}, @@ -3657,13 +3949,14 @@ xlsx = ["networkx", "openpyxl", "pandas", "xlrd"] [[package]] name = "unstructured-client" -version = "0.36.0" +version = "0.37.2" description = "Python Client SDK for Unstructured API" optional = false python-versions = ">=3.9.2" +groups = ["main"] files = [ - {file = "unstructured_client-0.36.0-py3-none-any.whl", hash = "sha256:d0ecf3ac4d481437d858147904ff6e41205032cf8353af5cdd3ebaa190481d6a"}, - {file = "unstructured_client-0.36.0.tar.gz", hash = "sha256:ab293498100275c0e1d74c926c82dae2b3ba3fbb88945c0ba03b4b7a29197e4a"}, + {file = "unstructured_client-0.37.2-py3-none-any.whl", hash = "sha256:2bf5d55fc36ba06881b720eb8540400e60f70aaec011401075979b472cf45c60"}, + {file = "unstructured_client-0.37.2.tar.gz", hash = "sha256:936b67e7f1108248160d00ac0e673f06e2f6269d2955368ac523c0cd7c65af5e"}, ] [package.dependencies] @@ -3677,17 +3970,18 @@ requests-toolbelt = ">=1.0.0" [[package]] name = "urllib3" -version = "2.4.0" +version = "2.5.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, - {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, + {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, + {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -3698,6 +3992,8 @@ version = "0.34.3" description = "The lightning-fast ASGI server." optional = false python-versions = ">=3.9" +groups = ["main"] +markers = "sys_platform != \"emscripten\"" files = [ {file = "uvicorn-0.34.3-py3-none-any.whl", hash = "sha256:16246631db62bdfbf069b0645177d6e8a77ba950cfedbfd093acef9444e4d885"}, {file = "uvicorn-0.34.3.tar.gz", hash = "sha256:35919a9a979d7a59334b6b10e05d77c1d0d574c50e0fc98b8b1a0f165708b55a"}, @@ -3709,7 +4005,7 @@ h11 = ">=0.8" typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} [package.extras] -standard = ["colorama (>=0.4)", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] +standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"] [[package]] name = "virtualenv" @@ -3717,6 +4013,7 @@ version = "20.31.2" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "virtualenv-20.31.2-py3-none-any.whl", hash = "sha256:36efd0d9650ee985f0cad72065001e66d49a6f24eb44d98980f630686243cf11"}, {file = "virtualenv-20.31.2.tar.gz", hash = "sha256:e10c0a9d02835e592521be48b332b6caee6887f332c111aa79a09b9e79efc2af"}, @@ -3729,7 +4026,7 @@ platformdirs = ">=3.9.1,<5" [package.extras] docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] -test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"GraalVM\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] [[package]] name = "wrapt" @@ -3737,6 +4034,7 @@ version = "1.17.2" description = "Module for decorators, wrappers and monkey patching." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"}, {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"}, @@ -3820,6 +4118,6 @@ files = [ ] [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = ">=3.10.0,<3.12" -content-hash = "fde38412e067c3bec2be76f8b2ebcf338035ecea116c3a1b1a4a2ebfad5d7354" +content-hash = "32ef47989ca31524cd42b0cdaeae36d57f4e4c4967333ad44fd2e6af1c3d14b3" diff --git a/pyproject.toml b/pyproject.toml index a8fd6a8b..ba99dcde 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "camel-oasis" -version = "0.2.2" +version = "0.2.3" description = "Open Agents Social Interaction Simulations on a Large Scale" authors = ["CAMEL-AI.org"] readme = "README.md" @@ -36,7 +36,7 @@ prance = "23.6.21.0" openapi-spec-validator = "0.7.1" slack_sdk = "3.31.0" neo4j = "5.23.0" -camel-ai = "0.2.62" +camel-ai = "0.2.70" requests_oauthlib = "2.0.0" [build-system] diff --git a/test/agent/test_action_docstring.py b/test/agent/test_action_docstring.py index 06d12d25..f6cc2a08 100644 --- a/test/agent/test_action_docstring.py +++ b/test/agent/test_action_docstring.py @@ -43,6 +43,7 @@ def test_transfer_to_openai_function(): SocialAction.undo_dislike_comment, SocialAction.do_nothing, SocialAction.purchase_product, + SocialAction.report_post, SocialAction.join_group, SocialAction.leave_group, SocialAction.listen_from_group, diff --git a/test/agent/test_agent_tools.py b/test/agent/test_agent_tools.py index 25b95681..3ea38bc1 100644 --- a/test/agent/test_agent_tools.py +++ b/test/agent/test_agent_tools.py @@ -67,7 +67,7 @@ async def test_agents_posting(setup_platform): channel=channel, tools=MathToolkit().get_tools(), available_actions=[ActionType.CREATE_POST], - single_iteration=False) + max_iteration=1) await agent.env.action.sign_up(f"user{i}", f"User{i}", "A bio.") agents.append(agent) diff --git a/test/agent/test_twitter_user_agent_all_actions.py b/test/agent/test_twitter_user_agent_all_actions.py index a92f4eb7..a4f28f41 100644 --- a/test/agent/test_twitter_user_agent_all_actions.py +++ b/test/agent/test_twitter_user_agent_all_actions.py @@ -143,6 +143,19 @@ async def test_agents_actions(setup_twitter): assert return_message["success"] is True await asyncio.sleep(random.uniform(0, 0.1)) + # report once + return_message = await action_agent.env.action.report_post( + 1, "Inappropriate content") + assert return_message["success"] is True + await asyncio.sleep(random.uniform(0, 0.1)) + + # report twice + other_agent = agents[1] + return_message = await other_agent.env.action.report_post( + 1, "Spam content") + assert return_message["success"] is True + await asyncio.sleep(random.uniform(0, 0.1)) + return_message = await action_agent.env.action.like_comment(1) assert return_message["success"] is True await asyncio.sleep(random.uniform(0, 0.1)) diff --git a/test/infra/database/test_create_fetch_database.py b/test/infra/database/test_create_fetch_database.py index 7de89515..efb98dde 100644 --- a/test/infra/database/test_create_fetch_database.py +++ b/test/infra/database/test_create_fetch_database.py @@ -126,8 +126,9 @@ def test_post_operations(): # Insert a post: cursor.execute( ("INSERT INTO post (user_id, content, created_at, num_likes, " - "num_dislikes, num_shares) VALUES (?, ?, ?, ?, ?, ?)"), - (1, "This is a test post", "2024-04-21 22:02:42", 0, 1, 2), + "num_dislikes, num_shares, num_reports) VALUES (?, ?, ?, ?, ?, ?, ?)" + ), + (1, "This is a test post", "2024-04-21 22:02:42", 0, 1, 2, 0), ) conn.commit() @@ -141,6 +142,7 @@ def test_post_operations(): assert post[6] == 0 assert post[7] == 1 assert post[8] == 2 + assert post[9] == 0 # Update the post cursor.execute( @@ -158,7 +160,8 @@ def test_post_operations(): 'created_at': '2024-04-21 22:02:42', 'num_likes': 0, 'num_dislikes': 1, - 'num_shares': 2 + 'num_shares': 2, + 'num_reports': 0 }] actual_result = fetch_table_from_db(cursor, "post") diff --git a/test/infra/database/test_report_post.py b/test/infra/database/test_report_post.py new file mode 100644 index 00000000..c15f0a1e --- /dev/null +++ b/test/infra/database/test_report_post.py @@ -0,0 +1,169 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +import os +import os.path as osp +import sqlite3 + +import pytest + +from oasis.social_platform.platform import Platform + +parent_folder = osp.dirname(osp.abspath(__file__)) +test_db_filepath = osp.join(parent_folder, "test.db") + + +class MockChannel: + + def __init__(self): + self.call_count = 0 + self.messages = [] + + async def receive_from(self): + # Simulate a series of operations + if self.call_count == 0: + self.call_count += 1 + return ("id_", (1, "This is a common post", "create_post")) + elif self.call_count == 1: + self.call_count += 1 + return ("id_", (2, 1, "repost")) + elif self.call_count == 2: + self.call_count += 1 + return ("id_", (3, (1, "This is a quote comment"), "quote_post")) + elif self.call_count == 3: + self.call_count += 1 + return ("id_", (2, (1, "Inappropriate content"), "report_post")) + elif self.call_count == 4: + self.call_count += 1 + return ("id_", (3, (2, "Spam content"), "report_post")) + elif self.call_count == 5: + self.call_count += 1 + return ("id_", (1, (3, "Misinformation"), "report_post")) + else: + return ("id_", (None, None, "exit")) + + async def send_to(self, message): + self.messages.append(message) + if self.call_count == 1: + # Verify common post creation success + assert message[2]["success"] is True + assert "post_id" in message[2] + elif self.call_count == 2: + # Verify repost success + assert message[2]["success"] is True + assert "post_id" in message[2] + elif self.call_count == 3: + # Verify quote post success + assert message[2]["success"] is True + assert "post_id" in message[2] + elif self.call_count == 4: + # Verify report on common post success + assert message[2]["success"] is True + assert "report_id" in message[2] + elif self.call_count == 5: + # Verify report on repost success + assert message[2]["success"] is True + assert "report_id" in message[2] + elif self.call_count == 6: + # Verify report on quote post success + assert message[2]["success"] is True + assert "report_id" in message[2] + + +@pytest.fixture +def setup_platform(): + # Ensure test database doesn't exist + if os.path.exists(test_db_filepath): + os.remove(test_db_filepath) + + # Create database and tables + db_path = test_db_filepath + mock_channel = MockChannel() + instance = Platform(db_path=db_path, channel=mock_channel) + return instance + + +@pytest.mark.asyncio +async def test_report_post(setup_platform): + try: + platform = setup_platform + + # Insert test users + conn = sqlite3.connect(test_db_filepath) + cursor = conn.cursor() + cursor.execute( + ("INSERT INTO user " + "(user_id, agent_id, user_name, num_followings, num_followers) " + "VALUES (?, ?, ?, ?, ?)"), + (1, 1, "user1", 0, 0), + ) + cursor.execute( + ("INSERT INTO user " + "(user_id, agent_id, user_name, num_followings, num_followers) " + "VALUES (?, ?, ?, ?, ?)"), + (2, 2, "user2", 0, 0), + ) + cursor.execute( + ("INSERT INTO user " + "(user_id, agent_id, user_name, num_followings, num_followers) " + "VALUES (?, ?, ?, ?, ?)"), + (3, 3, "user3", 0, 0), + ) + conn.commit() + + await platform.running() + + # Verify report records in database + cursor.execute("SELECT * FROM report ORDER BY created_at") + reports = cursor.fetchall() + + # Verify number of report records + assert len(reports) == 3, "Should have 3 report records" + + # Verify report on common post + assert reports[0][1] == 2, "Reporter user ID should be 2" + assert reports[0][2] == 1, "Reported post ID should be 1" + assert reports[0][3] == "Inappropriate content", ( + "Report reason doesn't match") + assert reports[0][4] is not None, "Creation time should not be empty" + + # Verify report on repost + assert reports[1][1] == 3, "Reporter user ID should be 3" + assert reports[1][2] == 2, "Reported post ID should be 2" + assert reports[1][3] == "Spam content", ("Report reason doesn't match") + assert reports[1][4] is not None, "Creation time should not be empty" + + # Verify report on quote post + assert reports[2][1] == 1, "Reporter user ID should be 1" + assert reports[2][2] == 3, "Reported post ID should be 3" + assert reports[2][3] == "Misinformation", ( + "Report reason doesn't match") + assert reports[2][4] is not None, "Creation time should not be empty" + + # Verify post report counts + cursor.execute( + "SELECT post_id, num_reports FROM post ORDER BY post_id") + post_reports = cursor.fetchall() + + # Should have report counts for all three posts + assert len(post_reports) == 3, "Should have 3 posts with report counts" + + # Verify report counts + for post_id, num_reports in post_reports: + assert num_reports == 1, f"Post {post_id} should have 1 report" + + finally: + # Cleanup + conn.close() + if os.path.exists(test_db_filepath): + os.remove(test_db_filepath)