Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 33 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,34 @@
.qodo
# Python
__pycache__/
*.py[cod]
*$py.class
venv/
env/
.env

# Models (Extremely Important - Do not push 500MB+ files)
**/models/*.gguf
**/models/*.safetensors
**/models/*.bin
**/models/config.json
**/models/adapter_config.json

# Node / React Native
node_modules/
.expo/
dist/
npm-debug.log*
yarn-debug.log*
yarn-error.log*

# Mac / OS specific
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes

# Debugging
ocr_debug.txt
*.logBackend/db/chromadb/chroma.sqlite3
Comment on lines +32 to +34
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Malformed gitignore entry — missing newline.

Line 34 concatenates two patterns: *.log and Backend/db/chromadb/chroma.sqlite3. Neither pattern will match correctly.

Proposed fix
 # Debugging
 ocr_debug.txt
-*.logBackend/db/chromadb/chroma.sqlite3
+*.log
+Backend/db/chromadb/chroma.sqlite3
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
# Debugging
ocr_debug.txt
*.logBackend/db/chromadb/chroma.sqlite3
# Debugging
ocr_debug.txt
*.log
Backend/db/chromadb/chroma.sqlite3
🤖 Prompt for AI Agents
In @.gitignore around lines 32 - 34, The .gitignore has a malformed entry where
the patterns "*.log" and "Backend/db/chromadb/chroma.sqlite3" are concatenated
on one line; split them into two separate lines so each pattern is on its own
line (ensure "*.log" is one line and "Backend/db/chromadb/chroma.sqlite3" is the
next) so both patterns match as intended; verify the surrounding entries like
"ocr_debug.txt" remain unchanged.

50 changes: 17 additions & 33 deletions Backend/agent/handlers/appointment.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,44 +81,28 @@ def parse_appointment_command(query: str):
}

def parse_date(date_str):
"""Parse date string to ISO format."""
if not date_str:
return None

if not date_str: return None
today = datetime.now()
date_str_lower = date_str.lower()
date_str_lower = date_str.lower().strip()

if date_str_lower == 'today':
return today.strftime('%Y-%m-%d')
elif date_str_lower == 'tomorrow':
return (today + timedelta(days=1)).strftime('%Y-%m-%d')
elif date_str_lower == 'next week':
return (today + timedelta(days=7)).strftime('%Y-%m-%d')

# Try to parse as MM/DD or MM/DD/YYYY
try:
if '/' in date_str:
parts = date_str.split('/')
if len(parts) == 2:
month, day = int(parts[0]), int(parts[1])
year = today.year
if month < today.month or (month == today.month and day < today.day):
year += 1
return f"{year}-{month:02d}-{day:02d}"
elif len(parts) == 3:
month, day, year = int(parts[0]), int(parts[1]), int(parts[2])
return f"{year}-{month:02d}-{day:02d}"
except:
pass
if date_str_lower == 'next month':
next_month = (today.month % 12) + 1
year = today.year + (1 if today.month == 12 else 0)
return f"{year}-{next_month:02d}-01"

# Try to parse as YYYY-MM-DD
try:
datetime.strptime(date_str, '%Y-%m-%d')
return date_str
except:
pass
day_mapping = {
'monday': 0, 'mon': 0, 'tuesday': 1, 'tue': 1,
'wednesday': 2, 'wed': 2, 'thursday': 3, 'thu': 3, 'thurs': 3,
'friday': 4, 'fri': 4, 'saturday': 5, 'sat': 5, 'sunday': 6, 'sun': 6,
}

if date_str_lower in day_mapping:
days_ahead = day_mapping[date_str_lower] - today.weekday()
if days_ahead <= 0: days_ahead += 7
return (today + timedelta(days=days_ahead)).strftime('%Y-%m-%d')

return None
return date_str
Comment on lines 83 to +105
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Missing "today", "tomorrow", and "next week" parsing — regression risk.

The date_patterns regex (lines 39-41) still matches today, tomorrow, and next week, but parse_date no longer handles them. These inputs will pass through unchanged and be stored as raw strings (e.g., "tomorrow") instead of canonical dates.

Based on learnings, ensure parse_date normalizes all date strings the model may produce.

Proposed fix to restore common date handling
 def parse_date(date_str):
     if not date_str: return None
     today = datetime.now()
     date_str_lower = date_str.lower().strip()
     
+    if date_str_lower == 'today':
+        return today.strftime('%Y-%m-%d')
+    
+    if date_str_lower == 'tomorrow':
+        return (today + timedelta(days=1)).strftime('%Y-%m-%d')
+    
+    if date_str_lower == 'next week':
+        return (today + timedelta(days=7)).strftime('%Y-%m-%d')
     
     if date_str_lower == 'next month':
         next_month = (today.month % 12) + 1
         year = today.year + (1 if today.month == 12 else 0)
         return f"{year}-{next_month:02d}-01"
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
def parse_date(date_str):
"""Parse date string to ISO format."""
if not date_str:
return None
if not date_str: return None
today = datetime.now()
date_str_lower = date_str.lower()
date_str_lower = date_str.lower().strip()
if date_str_lower == 'today':
return today.strftime('%Y-%m-%d')
elif date_str_lower == 'tomorrow':
return (today + timedelta(days=1)).strftime('%Y-%m-%d')
elif date_str_lower == 'next week':
return (today + timedelta(days=7)).strftime('%Y-%m-%d')
# Try to parse as MM/DD or MM/DD/YYYY
try:
if '/' in date_str:
parts = date_str.split('/')
if len(parts) == 2:
month, day = int(parts[0]), int(parts[1])
year = today.year
if month < today.month or (month == today.month and day < today.day):
year += 1
return f"{year}-{month:02d}-{day:02d}"
elif len(parts) == 3:
month, day, year = int(parts[0]), int(parts[1]), int(parts[2])
return f"{year}-{month:02d}-{day:02d}"
except:
pass
if date_str_lower == 'next month':
next_month = (today.month % 12) + 1
year = today.year + (1 if today.month == 12 else 0)
return f"{year}-{next_month:02d}-01"
# Try to parse as YYYY-MM-DD
try:
datetime.strptime(date_str, '%Y-%m-%d')
return date_str
except:
pass
day_mapping = {
'monday': 0, 'mon': 0, 'tuesday': 1, 'tue': 1,
'wednesday': 2, 'wed': 2, 'thursday': 3, 'thu': 3, 'thurs': 3,
'friday': 4, 'fri': 4, 'saturday': 5, 'sat': 5, 'sunday': 6, 'sun': 6,
}
if date_str_lower in day_mapping:
days_ahead = day_mapping[date_str_lower] - today.weekday()
if days_ahead <= 0: days_ahead += 7
return (today + timedelta(days=days_ahead)).strftime('%Y-%m-%d')
return None
return date_str
def parse_date(date_str):
if not date_str: return None
today = datetime.now()
date_str_lower = date_str.lower().strip()
if date_str_lower == 'today':
return today.strftime('%Y-%m-%d')
if date_str_lower == 'tomorrow':
return (today + timedelta(days=1)).strftime('%Y-%m-%d')
if date_str_lower == 'next week':
return (today + timedelta(days=7)).strftime('%Y-%m-%d')
if date_str_lower == 'next month':
next_month = (today.month % 12) + 1
year = today.year + (1 if today.month == 12 else 0)
return f"{year}-{next_month:02d}-01"
day_mapping = {
'monday': 0, 'mon': 0, 'tuesday': 1, 'tue': 1,
'wednesday': 2, 'wed': 2, 'thursday': 3, 'thu': 3, 'thurs': 3,
'friday': 4, 'fri': 4, 'saturday': 5, 'sat': 5, 'sunday': 6, 'sun': 6,
}
if date_str_lower in day_mapping:
days_ahead = day_mapping[date_str_lower] - today.weekday()
if days_ahead <= 0: days_ahead += 7
return (today + timedelta(days=days_ahead)).strftime('%Y-%m-%d')
return date_str
🧰 Tools
🪛 Ruff (0.14.13)

84-84: Multiple statements on one line (colon)

(E701)


102-102: Multiple statements on one line (colon)

(E701)

🤖 Prompt for AI Agents
In `@Backend/agent/handlers/appointment.py` around lines 83 - 105, The parse_date
function no longer normalizes common relative terms ("today", "tomorrow", "next
week"), causing those strings to be stored raw; update parse_date to detect
(case-insensitive) "today" -> return today's date in '%Y-%m-%d', "tomorrow" ->
today + 1 day, and "next week" -> today + 7 days (use the existing
datetime/timedelta imports and the same strftime formatting), keeping the
existing handling for weekdays and "next month"; ensure the checks occur before
falling back to returning the original string so all terms matched by the
date_patterns regex are converted to canonical ISO dates.


def parse_time(time_str):
"""Parse time string to HH:MM format."""
Expand Down
75 changes: 25 additions & 50 deletions Backend/agent/llm.py
Original file line number Diff line number Diff line change
@@ -1,55 +1,30 @@
from llama_cpp import Llama


llm = Llama(
model_path="./models/qwen2-0_5b-instruct-q4_k_m.gguf",
lora_path="./models/adapter_model.bin",
n_ctx=512,
n_gpu_layers=-1
)
Comment on lines +4 to +9
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Global model instantiation risks startup failure and blocks import.

Loading the model at module import time has several issues:

  1. No error handling — if model files are missing, the import crashes the entire application
  2. Blocks startup — model loading is synchronous and potentially slow
  3. Relative paths"./models/..." depends on the current working directory, which is fragile

Consider lazy initialization with error handling:

Proposed refactor with lazy loading
 from llama_cpp import Llama
+import os

-
-llm = Llama(
-    model_path="./models/qwen2-0_5b-instruct-q4_k_m.gguf", 
-    lora_path="./models/adapter_model.bin",               
-    n_ctx=512,                                            
-    n_gpu_layers=-1                                       
-)
+_llm = None
+
+def get_llm():
+    global _llm
+    if _llm is None:
+        base_dir = os.path.dirname(os.path.abspath(__file__))
+        model_path = os.path.join(base_dir, "..", "models", "qwen2-0_5b-instruct-q4_k_m.gguf")
+        lora_path = os.path.join(base_dir, "..", "models", "adapter_model.bin")
+        
+        if not os.path.exists(model_path):
+            raise FileNotFoundError(f"Model not found: {model_path}")
+        
+        _llm = Llama(
+            model_path=model_path,
+            lora_path=lora_path if os.path.exists(lora_path) else None,
+            n_ctx=512,
+            n_gpu_layers=-1
+        )
+    return _llm
🤖 Prompt for AI Agents
In `@Backend/agent/llm.py` around lines 4 - 9, The module currently creates a
global Llama instance (llm) at import, which can crash or block startup; change
to lazy initialization: remove the top-level llm instantiation and implement a
get_llm() (or init_llm()) function that resolves model paths using
Path(__file__).resolve().parent / "models" for model_path and lora_path,
performs the Llama(...) creation inside a try/except, logs and returns None or
raises a clear error on failure, and caches the created instance for subsequent
calls to avoid repeated loads; optionally make the initializer callable from
async code or kick off loading in a background thread if non-blocking startup is
required.


def run_llm(prompt: str) -> str:
"""
Run LLM inference.

For the offline BabyNest app:
- This will be called from the frontend using Llama.rn
- The frontend will handle the actual LLM inference
- This function prepares the prompt for frontend processing

Args:
prompt: The formatted prompt with user context and query

Returns:
str: LLM response (will be replaced by frontend Llama.rn call)
"""
# TODO: Replace with frontend Llama.rn integration
# For now, return a structured response based on the prompt content

if "weight" in prompt.lower():
return """Based on your weight tracking data, you're showing a healthy pattern.
Your weight gain is within normal ranges for pregnancy. Continue monitoring weekly
and consult your healthcare provider if you notice any sudden changes."""

elif "appointment" in prompt.lower():
return """I can help you manage your appointments. Based on your current week,
you should focus on regular prenatal checkups. Would you like me to suggest
optimal scheduling times or help reschedule any missed appointments?"""

elif "symptoms" in prompt.lower():
return """I see you're tracking various symptoms. This is normal during pregnancy.
Continue monitoring and report any concerning symptoms to your healthcare provider.
Your tracking data helps identify patterns that may need attention."""

else:
return """I'm here to support your pregnancy journey! Based on your current week
and tracking data, you're doing well. Remember to stay hydrated, get adequate rest,
and maintain regular prenatal care. Is there anything specific you'd like to know
about your current pregnancy week?"""
"""Actual inference logic for medical extraction."""
output = llm(
prompt,
max_tokens=256,
stop=["}"],
temperature=0
)
response = output['choices'][0]['text'].strip()
# Ensuring valid JSON structure
return response + "}" if not response.endswith("}") else response

def prepare_prompt_for_frontend(prompt: str) -> dict:
"""
Prepare prompt for frontend Llama.rn processing.

Args:
prompt: The formatted prompt

Returns:
dict: Structured data for frontend LLM processing
"""
"""Prepare prompt for future frontend Llama.rn processing."""
return {
"prompt": prompt,
"max_tokens": 500,
"temperature": 0.7,
"system_message": "You are BabyNest, an empathetic pregnancy companion providing personalized, evidence-based guidance."
}
"max_tokens": 150,
"temperature": 0.1,
"system_message": "You are BabyNest, an empathetic pregnancy companion. Extract medical data into JSON."
}
Loading