-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathapi_server.py
More file actions
102 lines (82 loc) · 2.71 KB
/
api_server.py
File metadata and controls
102 lines (82 loc) · 2.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.responses import StreamingResponse
import sqlite3
import requests
import json
# 🔄 Import your custom LLaMA SQL logic
from llama_sql_generator import generate_sql_with_llama, clean_sql, get_all_table_columns
app = FastAPI()
# 🎯 Request model
class QuestionRequest(BaseModel):
question: str
# ✅ /ask endpoint — for SQL generation + execution
@app.post("/ask")
def ask_question(request: QuestionRequest):
try:
# 1. Generate SQL from question
raw_sql = generate_sql_with_llama(request.question)
sql_query = clean_sql(raw_sql)
# 2. Allow only SELECT or WITH queries
if not sql_query.strip().lower().startswith(("select", "with")):
raise ValueError("Only SELECT queries are allowed.")
# 3. Connect to SQLite DB
conn = sqlite3.connect('ecommerce.db')
cursor = conn.cursor()
# 4. Run SQL
cursor.execute(sql_query)
rows = cursor.fetchall()
# 5. Format results
columns = [desc[0] for desc in cursor.description]
result = [dict(zip(columns, row)) for row in rows]
return {
"question": request.question,
"sql_query": sql_query,
"result": result
}
except Exception as e:
return {
"error": f"{type(e).__name__}: {str(e)}",
"generated_sql": locals().get("sql_query", "N/A")
}
finally:
try:
conn.close()
except:
pass
# ✅ Streaming helper (token-by-token from Ollama)
def stream_llama_response(prompt: str):
url = "http://localhost:11434/api/generate"
payload = {
"model": "llama3",
"prompt": prompt,
"stream": True
}
response = requests.post(url, json=payload, stream=True)
def event_stream():
for line in response.iter_lines():
if line:
try:
json_data = json.loads(line.decode("utf-8"))
token = json_data.get("response", "")
yield token
except:
continue
return event_stream()
# ✅ /stream endpoint — for typing-style natural language answer
@app.post("/stream")
def stream_question(request: QuestionRequest):
try:
# Add table schema to help LLaMA
schema = get_all_table_columns()
prompt = f"""You are a helpful assistant. Answer the user's question clearly.
Database Schema:
{schema}
Question: {request.question}
Answer:"""
return StreamingResponse(
stream_llama_response(prompt),
media_type="text/plain"
)
except Exception as e:
return {"error": str(e)}