Skip to content

Commit 9d308a6

Browse files
committed
fix(config): Add some missing config settings to the config schema
1 parent 6923925 commit 9d308a6

File tree

5 files changed

+25
-7
lines changed

5 files changed

+25
-7
lines changed

env_template

+1-1
Original file line numberDiff line numberDiff line change
@@ -43,4 +43,4 @@ AUTH_URL="http://0.0.0.0:4000/key/info"
4343
# LANGSMITH_API_KEY="<<<YOUR_LANGSMITH_API_KEY_HERE>>>
4444

4545
# User agent for the crawler.
46-
USER_AGENT="Moodle Research Wiki-RAG Crawler/{version} (https://github.com/moodlehq/wiwki-rag)"
46+
USER_AGENT="Moodle Research Wiki-RAG Crawler/{version} (https://github.com/moodlehq/wiki-rag)"

wiki_rag/search/main.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525

2626

2727
async def run():
28-
"""Make an index from the json information present in the specified file."""
28+
"""Perform a search with all the configuration in place."""
2929
setup_logging(level=LOG_LEVEL)
3030
logger = logging.getLogger(__name__)
3131
logger.info("wiki_rag-search starting up...")
@@ -136,6 +136,7 @@ async def run():
136136
stream=stream,
137137
wrapper_chat_max_turns=0,
138138
wrapper_chat_max_tokens=0,
139+
wrapper_model_name=llm_model,
139140
).items()
140141

141142
# Prepare the configuration.

wiki_rag/search/util.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,8 @@ class ConfigSchema(TypedDict):
4747
llm_model: str
4848
search_distance_cutoff: float
4949
max_completion_tokens: int
50-
top_p: float
5150
temperature: float
51+
top_p: float
5252
stream: bool
5353
wrapper_chat_max_turns: int
5454
wrapper_chat_max_tokens: int

wiki_rag/server/main.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222

2323

2424
def main():
25-
"""Make an index from the json information present in the specified file."""
25+
"""Run the OpenAI server with all the configuration in place."""
2626
setup_logging(level=LOG_LEVEL)
2727
logger = logging.getLogger(__name__)
2828
logger.info("wiki_rag-server starting up...")

wiki_rag/server/server.py

+20-3
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,12 @@
88
import time
99
import uuid
1010

11+
from typing import Any
12+
1113
from fastapi import Depends, FastAPI, HTTPException
1214
from fastapi.responses import StreamingResponse
1315
from langchain_core.messages import AIMessageChunk, BaseMessage
16+
from langchain_core.runnables import RunnableConfig
1417

1518
from wiki_rag import __version__, server
1619
from wiki_rag.server.util import (
@@ -182,9 +185,10 @@ async def open_ai_langgraph_stream(question: str, history: list[BaseMessage]):
182185
return StreamingResponse(open_ai_langgraph_stream(question, history), media_type="text/event-stream")
183186
else:
184187
logger.info("Running the search (non-streaming)")
185-
completion = await server.graph.ainvoke(
186-
{"question": question, "history": history},
187-
config=server.config
188+
completion = await invoke_graph(
189+
question=question,
190+
history=history,
191+
config=server.config,
188192
)
189193

190194
return ChatCompletionResponse(
@@ -199,3 +203,16 @@ async def open_ai_langgraph_stream(question: str, history: list[BaseMessage]):
199203
),
200204
],
201205
)
206+
207+
208+
async def invoke_graph(
209+
question: str,
210+
history: list[BaseMessage],
211+
config: RunnableConfig,
212+
) -> Any:
213+
"""Invoke the graph with the given question, history and configuration. No streaming."""
214+
assert server.graph is not None
215+
return await server.graph.ainvoke(
216+
{"question": question, "history": history},
217+
config=config
218+
)

0 commit comments

Comments
 (0)