Skip to content

Commit 26fc0e9

Browse files
Milvus-doc-botMilvus-doc-bot
authored andcommitted
Translate blogs
1 parent ea47148 commit 26fc0e9

File tree

25 files changed

+4113
-1
lines changed

25 files changed

+4113
-1
lines changed
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
{"codeList":["from langchain.agents import create_agent\nagent = create_agent(\n model=\"openai:gpt-4o\",\n tools=[get_weather, query_database],\n system_prompt=\"You are a customer service assistant who helps users check the weather and order information.\"\n)\nresult = agent.invoke({\n \"messages\": [{\"role\": \"user\", \"content\": \"What’s the weather like in Shanghai today?\"}]\n})\n","from langchain.agents import create_agent\nfrom langchain.agents.middleware import PIIMiddleware\n\n\nagent = create_agent(\n model=\"gpt-4o\",\n tools=[], # Add tools as needed\n middleware=[\n # Redact emails in user input\n PIIMiddleware(\"email\", strategy=\"redact\", apply_to_input=True),\n # Mask credit cards (show last 4 digits)\n PIIMiddleware(\"credit_card\", strategy=\"mask\", apply_to_input=True),\n # Custom PII type with regex\n PIIMiddleware(\n \"api_key\",\n detector=r\"sk-[a-zA-Z0-9]{32}\",\n strategy=\"block\", # Raise error if detected\n ),\n ],\n)\n","from langchain.agents import create_agent\nfrom langchain.agents.middleware import SummarizationMiddleware\n\n\nagent = create_agent(\n model=\"gpt-4o\",\n tools=[weather_tool, calculator_tool],\n middleware=[\n SummarizationMiddleware(\n model=\"gpt-4o-mini\", #Summarize using a cheaper model \n max_tokens_before_summary=4000, # Trigger summarization at 4000 tokens\n messages_to_keep=20, # Keep last 20 messages after summary\n ),\n ],\n)\n","from langchain.agents import create_agent\nfrom langchain.agents.middleware import ToolRetryMiddleware\nagent = create_agent(\n model=\"gpt-4o\",\n tools=[search_tool, database_tool],\n middleware=[\n ToolRetryMiddleware(\n max_retries=3, # Retry up to 3 times\n backoff_factor=2.0, # Exponential backoff multiplier\n initial_delay=1.0, # Start with 1 second delay\n max_delay=60.0, # Cap delays at 60 seconds\n jitter=True, # Add random jitter to avoid thundering herd (±25%)\n\n ),\n ],\n)\n","from langchain.agents.middleware import before_model\nfrom langchain.agents.middleware import AgentState\nfrom langgraph.runtime import Runtime\n@before_model\ndef log_before_model(state: AgentState, runtime: Runtime) -> dict | None:\n print(f\"About to call model with {len(state['messages'])} messages\")\n return None # Returning None means the normal flow continues\nagent = create_agent(\n model=\"openai:gpt-4o\",\n tools=[...],\n middleware=[log_before_model],\n)\n","from langchain.agents import create_agent\nfrom pydantic import BaseModel, Field\nclass WeatherReport(BaseModel):\n location: str = Field(description=\"City name\")\n temperature: float = Field(description=\"Temperature (°C)\")\n condition: str = Field(description=\"Weather condition\")\nagent = create_agent(\n model=\"openai:gpt-4o\",\n tools=[get_weather],\n response_format=WeatherReport # Use the Pydantic model as the response schema\n)\nresult = agent.invoke({\"role\": \"user\", \"content\": \"What’s the weather like in Shanghai today??\"})\nweather_data = result['structured_response'] # Retrieve the structured response\nprint(f\"{weather_data.location}: {weather_data.temperature}°C, {weather_data.condition}\")\n","from langchain.agents import create_agent\nfrom langchain_milvus import Milvus\nfrom langchain_openai import OpenAIEmbeddings\n# Initialize the vector database as a knowledge base\nvectorstore = Milvus(\n embedding=OpenAIEmbeddings(), \n collection_name=\"company_knowledge\",\n connection_args={\"uri\": \"http://localhost:19530\"} #\n)\n# Convert the retriever into a Tool for the Agent\nagent = create_agent(\n model=\"openai:gpt-4o\",\n tools=[vectorstore.as_retriever().as_tool(\n name=\"knowledge_search\",\n description=\"Search the company knowledge base to answer professional questions\"\n )],\n system_prompt=\"You can retrieve information from the knowledge base to answer questions.\"\n)\n","from langchain_milvus import Milvus\nfrom langchain.agents import create_agent\nfrom langchain.agents.middleware import SummarizationMiddleware\nfrom langgraph.checkpoint.memory import InMemorySaver\n# Long-term memory storage(Milvus)\nlong_term_memory = Milvus.from_documents(\n documents=[], # Initially empty; dynamically updated at runtime\n embedding=OpenAIEmbeddings(),\n connection_args={\"uri\": \"./agent_memory.db\"}\n)\n# Short-term memory management(LangGraph Checkpointer + Summarization)\nagent = create_agent(\n model=\"openai:gpt-4o\",\n tools=[long_term_memory.as_retriever().as_tool(\n name=\"recall_memory\",\n description=\"Retrieve the agent’s historical memories and past experiences\"\n )],\n checkpointer=InMemorySaver(), # Short-term memory\n middleware=[\n SummarizationMiddleware(\n model=\"openai:gpt-4o-mini\",\n max_tokens_before_summary=4000 # When the threshold is exceeded, summarize and store it in Milvus\n )\n ]\n)\n","# Filter retrievals by source (e.g., search only medical reports)\nvectorstore.similarity_search(\n query=\"What is the patient's blood pressure reading?\",\n k=3,\n expr=\"source == 'medical_reports' AND modality == 'text'\" # Milvus scalar filtering\n)\n"],"headingContent":"","anchorList":[{"label":"لماذا يقصر التصميم القائم على السلسلة","href":"Why-the-Chain-based-Design-Falls-Short","type":2,"isActive":false},{"label":"سلسلة اللغات 1.0: وكيل ReAct الكل في الكل","href":"LangChain-10-All-in-ReAct-Agent","type":2,"isActive":false},{"label":"كيف يعزز ميلفوس ذاكرة الوكيل","href":"How-Milvus-Enhances-Agent-Memory","type":2,"isActive":false},{"label":"LangChain مقابل LangGraph: كيفية اختيار النظام الذي يناسب وكلاءك","href":"LangChain-vs-LangGraph-How-to-Choose-the-One-That-Fits-for-Your-Agents","type":2,"isActive":false},{"label":"الخلاصة","href":"Conclusion","type":2,"isActive":false}]}

0 commit comments

Comments
 (0)