-
-
Notifications
You must be signed in to change notification settings - Fork 122
feat(backend): implement centralized database service layer for health routes #184
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
84018e3
7e55a25
125eb05
baeaf46
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1 +1,34 @@ | ||
| .qodo | ||
| # Python | ||
| __pycache__/ | ||
| *.py[cod] | ||
| *$py.class | ||
| venv/ | ||
| env/ | ||
| .env | ||
|
|
||
| # Models (Extremely Important - Do not push 500MB+ files) | ||
| **/models/*.gguf | ||
| **/models/*.safetensors | ||
| **/models/*.bin | ||
| **/models/config.json | ||
| **/models/adapter_config.json | ||
|
|
||
| # Node / React Native | ||
| node_modules/ | ||
| .expo/ | ||
| dist/ | ||
| npm-debug.log* | ||
| yarn-debug.log* | ||
| yarn-error.log* | ||
|
|
||
| # Mac / OS specific | ||
| .DS_Store | ||
| .DS_Store? | ||
| ._* | ||
| .Spotlight-V100 | ||
| .Trashes | ||
|
|
||
| # Debugging | ||
| ocr_debug.txt | ||
| *.logBackend/db/chromadb/chroma.sqlite3 | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -81,44 +81,28 @@ def parse_appointment_command(query: str): | |
| } | ||
|
|
||
| def parse_date(date_str): | ||
| """Parse date string to ISO format.""" | ||
| if not date_str: | ||
| return None | ||
|
|
||
| if not date_str: return None | ||
| today = datetime.now() | ||
| date_str_lower = date_str.lower() | ||
| date_str_lower = date_str.lower().strip() | ||
|
|
||
| if date_str_lower == 'today': | ||
| return today.strftime('%Y-%m-%d') | ||
| elif date_str_lower == 'tomorrow': | ||
| return (today + timedelta(days=1)).strftime('%Y-%m-%d') | ||
| elif date_str_lower == 'next week': | ||
| return (today + timedelta(days=7)).strftime('%Y-%m-%d') | ||
|
|
||
| # Try to parse as MM/DD or MM/DD/YYYY | ||
| try: | ||
| if '/' in date_str: | ||
| parts = date_str.split('/') | ||
| if len(parts) == 2: | ||
| month, day = int(parts[0]), int(parts[1]) | ||
| year = today.year | ||
| if month < today.month or (month == today.month and day < today.day): | ||
| year += 1 | ||
| return f"{year}-{month:02d}-{day:02d}" | ||
| elif len(parts) == 3: | ||
| month, day, year = int(parts[0]), int(parts[1]), int(parts[2]) | ||
| return f"{year}-{month:02d}-{day:02d}" | ||
| except: | ||
| pass | ||
| if date_str_lower == 'next month': | ||
| next_month = (today.month % 12) + 1 | ||
| year = today.year + (1 if today.month == 12 else 0) | ||
| return f"{year}-{next_month:02d}-01" | ||
|
|
||
| # Try to parse as YYYY-MM-DD | ||
| try: | ||
| datetime.strptime(date_str, '%Y-%m-%d') | ||
| return date_str | ||
| except: | ||
| pass | ||
| day_mapping = { | ||
| 'monday': 0, 'mon': 0, 'tuesday': 1, 'tue': 1, | ||
| 'wednesday': 2, 'wed': 2, 'thursday': 3, 'thu': 3, 'thurs': 3, | ||
| 'friday': 4, 'fri': 4, 'saturday': 5, 'sat': 5, 'sunday': 6, 'sun': 6, | ||
| } | ||
|
|
||
| if date_str_lower in day_mapping: | ||
| days_ahead = day_mapping[date_str_lower] - today.weekday() | ||
| if days_ahead <= 0: days_ahead += 7 | ||
| return (today + timedelta(days=days_ahead)).strftime('%Y-%m-%d') | ||
|
|
||
| return None | ||
| return date_str | ||
|
Comment on lines
83
to
+105
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Missing handling for "today" and "tomorrow" keywords. The Proposed fix to add missing date handling def parse_date(date_str):
- if not date_str: return None
+ if not date_str:
+ return None
today = datetime.now()
date_str_lower = date_str.lower().strip()
+ if date_str_lower == 'today':
+ return today.strftime('%Y-%m-%d')
+
+ if date_str_lower == 'tomorrow':
+ return (today + timedelta(days=1)).strftime('%Y-%m-%d')
+
+ if date_str_lower == 'next week':
+ return (today + timedelta(weeks=1)).strftime('%Y-%m-%d')
if date_str_lower == 'next month':
next_month = (today.month % 12) + 1
year = today.year + (1 if today.month == 12 else 0)
return f"{year}-{next_month:02d}-01"
day_mapping = {
'monday': 0, 'mon': 0, 'tuesday': 1, 'tue': 1,
'wednesday': 2, 'wed': 2, 'thursday': 3, 'thu': 3, 'thurs': 3,
'friday': 4, 'fri': 4, 'saturday': 5, 'sat': 5, 'sunday': 6, 'sun': 6,
}
if date_str_lower in day_mapping:
days_ahead = day_mapping[date_str_lower] - today.weekday()
- if days_ahead <= 0: days_ahead += 7
+ if days_ahead <= 0:
+ days_ahead += 7
return (today + timedelta(days=days_ahead)).strftime('%Y-%m-%d')
return date_strBased on learnings: "Use the fine-tuned SLM model as the primary date extraction method. Implement a robust regex-based fallback to cover cases the model may miss." 🧰 Tools🪛 Ruff (0.14.14)84-84: Multiple statements on one line (colon) (E701) 102-102: Multiple statements on one line (colon) (E701) 🤖 Prompt for AI Agents |
||
|
|
||
| def parse_time(time_str): | ||
| """Parse time string to HH:MM format.""" | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,55 +1,30 @@ | ||
| from llama_cpp import Llama | ||
|
|
||
|
|
||
| llm = Llama( | ||
| model_path="./models/qwen2-0_5b-instruct-q4_k_m.gguf", | ||
| lora_path="./models/adapter_model.bin", | ||
| n_ctx=512, | ||
| n_gpu_layers=-1 | ||
| ) | ||
|
Comment on lines
+4
to
+9
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Module-level model initialization blocks imports and uses inconsistent paths.
Consider lazy initialization (as done in Proposed lazy initialization pattern from llama_cpp import Llama
+import os
-llm = Llama(
- model_path="./models/qwen2-0_5b-instruct-q4_k_m.gguf",
- lora_path="./models/adapter_model.bin",
- n_ctx=512,
- n_gpu_layers=-1
-)
+_llm = None
+
+def _get_llm():
+ global _llm
+ if _llm is None:
+ base_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "models")
+ _llm = Llama(
+ model_path=os.path.join(base_dir, "qwen2-0_5b-instruct-q4_k_m.gguf"),
+ lora_path=os.path.join(base_dir, "medical_adapter.gguf"), # Align with app.py
+ n_ctx=512,
+ n_gpu_layers=-1
+ )
+ return _llm
def run_llm(prompt: str) -> str:
"""Actual inference logic for medical extraction."""
+ llm = _get_llm()
output = llm(🤖 Prompt for AI Agents |
||
|
|
||
| def run_llm(prompt: str) -> str: | ||
| """ | ||
| Run LLM inference. | ||
|
|
||
| For the offline BabyNest app: | ||
| - This will be called from the frontend using Llama.rn | ||
| - The frontend will handle the actual LLM inference | ||
| - This function prepares the prompt for frontend processing | ||
|
|
||
| Args: | ||
| prompt: The formatted prompt with user context and query | ||
|
|
||
| Returns: | ||
| str: LLM response (will be replaced by frontend Llama.rn call) | ||
| """ | ||
| # TODO: Replace with frontend Llama.rn integration | ||
| # For now, return a structured response based on the prompt content | ||
|
|
||
| if "weight" in prompt.lower(): | ||
| return """Based on your weight tracking data, you're showing a healthy pattern. | ||
| Your weight gain is within normal ranges for pregnancy. Continue monitoring weekly | ||
| and consult your healthcare provider if you notice any sudden changes.""" | ||
|
|
||
| elif "appointment" in prompt.lower(): | ||
| return """I can help you manage your appointments. Based on your current week, | ||
| you should focus on regular prenatal checkups. Would you like me to suggest | ||
| optimal scheduling times or help reschedule any missed appointments?""" | ||
|
|
||
| elif "symptoms" in prompt.lower(): | ||
| return """I see you're tracking various symptoms. This is normal during pregnancy. | ||
| Continue monitoring and report any concerning symptoms to your healthcare provider. | ||
| Your tracking data helps identify patterns that may need attention.""" | ||
|
|
||
| else: | ||
| return """I'm here to support your pregnancy journey! Based on your current week | ||
| and tracking data, you're doing well. Remember to stay hydrated, get adequate rest, | ||
| and maintain regular prenatal care. Is there anything specific you'd like to know | ||
| about your current pregnancy week?""" | ||
| """Actual inference logic for medical extraction.""" | ||
| output = llm( | ||
| prompt, | ||
| max_tokens=256, | ||
| stop=["}"], | ||
| temperature=0 | ||
| ) | ||
| response = output['choices'][0]['text'].strip() | ||
| # Ensuring valid JSON structure | ||
| return response + "}" if not response.endswith("}") else response | ||
|
Comment on lines
+19
to
+21
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Unsafe access to LLM output structure. Accessing Defensive access pattern- response = output['choices'][0]['text'].strip()
+ choices = output.get('choices', [])
+ if not choices:
+ return "{}"
+ response = choices[0].get('text', '').strip()🤖 Prompt for AI Agents |
||
|
|
||
| def prepare_prompt_for_frontend(prompt: str) -> dict: | ||
| """ | ||
| Prepare prompt for frontend Llama.rn processing. | ||
|
|
||
| Args: | ||
| prompt: The formatted prompt | ||
|
|
||
| Returns: | ||
| dict: Structured data for frontend LLM processing | ||
| """ | ||
| """Prepare prompt for future frontend Llama.rn processing.""" | ||
| return { | ||
| "prompt": prompt, | ||
| "max_tokens": 500, | ||
| "temperature": 0.7, | ||
| "system_message": "You are BabyNest, an empathetic pregnancy companion providing personalized, evidence-based guidance." | ||
| } | ||
| "max_tokens": 150, | ||
| "temperature": 0.1, | ||
| "system_message": "You are BabyNest, an empathetic pregnancy companion. Extract medical data into JSON." | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fix the concatenated ignore pattern.
Line 34 appears to merge two patterns into one, which won’t match either
.logfiles or the sqlite path. Split into two lines.🛠️ Proposed fix
📝 Committable suggestion
🤖 Prompt for AI Agents