Skip to content
Open

jj #13

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file modified fastapi-backend/__pycache__/app.cpython-311.pyc
Binary file not shown.
Binary file modified fastapi-backend/__pycache__/keys.cpython-311.pyc
Binary file not shown.
Binary file modified fastapi-backend/__pycache__/models.cpython-311.pyc
Binary file not shown.
72 changes: 52 additions & 20 deletions fastapi-backend/deepgram_prompter.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,15 @@

from fastapi import HTTPException, APIRouter
import requests
import re
import json
from playsound import playsound
from keys import *
import os
from pydub import AudioSegment
from pydub.playback import play
from deprecated_gemini_prompter import *





router = APIRouter()
Expand All @@ -18,32 +24,58 @@
# In-memory conversation store
conversations = {}

def emphasize_keywords(text):
for word in KEY_MEDICAL_WORDS:
text = re.sub(rf'\b{word}\b', f'<emphasis>{word}</emphasis>', text, flags=re.IGNORECASE)
return text
# def emphasize_keywords(text):
# for word in KEY_MEDICAL_WORDS:
# text = re.sub(rf'\b{word}\b', f'<emphasis>{word}</emphasis>', text, flags=re.IGNORECASE)
# return text


def convert_text_to_speech(text: str) -> str:
ssml_text = f"<speak>{text}</speak>" # Wrap text in SSML



import requests
from playsound import playsound

# Your Deepgram API key
def play_audio(audio_file_path):
playsound(audio_file_path)
def text_to_speech(text):
url = "https://api.deepgram.com/v1/speak?model=aura-asteria-en"
headers = {
'Authorization': f'Token {DEEPGRAM_API_KEY}',
'Content-Type': 'application/json',
"Authorization": f"Token {deepgram_key}",
"Content-Type": "application/json"
}
payload = {
'text': ssml_text,
'voice': 'en-US-Wavenet-D',
'ssml': True
data = {
"text": text
}

response = requests.post(DEEPGRAM_API_URL, headers=headers, json=payload)
response = requests.post(url, headers=headers, json=data)

if response.status_code == 200:
audio_file_path = 'output_audio.wav'
with open(audio_file_path, 'wb') as file:
file.write(response.content)
return audio_file_path
audio_file_path = "response_audio.mp3"
with open(audio_file_path, "wb") as f:
f.write(response.content)
playsound(audio_file_path)
else:
raise HTTPException(status_code=response.status_code, detail=response.text)
print("Error:", response.status_code, response.text)
return None




def main():
# Example string to convert to audio
prompt_text = "This is a test string that will be converted to audio."
print("Generating audio for prompt:", prompt_text)

# Generate audio from the string
audio_file_path = text_to_speech(prompt_text)

if audio_file_path:
print(f"Playing audio from: {audio_file_path}")
play_audio(audio_file_path)
else:
print("Failed to generate audio.")

if __name__ == "__main__":
main()
79 changes: 46 additions & 33 deletions fastapi-backend/gemini_fastapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
from typing import List, Optional
from keys import GEMINI_KEY
from models import *
from deepgram_prompter import convert_text_to_speech
from deepgram_prompter import text_to_speech
from playsound import playsound

# Configure the Google Gemini model
genai.configure(api_key=GEMINI_KEY)
Expand All @@ -13,23 +14,38 @@
router = APIRouter()

# Input Pydantic model for request body
class Inputs(BaseModel):
dispatch_request: DispatchRequest = None
patient: Patient = None
class base_inputs:
name: str = 'John'
phone: str = '416-234-5837'
medical_conditions: List[str] = ['asthma']
allergies: Optional[List[str]] = ['peanuts']
medications: Optional[List[str]] = ['inhaler']
id: UUID = None
age: int = 30
gender: str = 'Male'
emergency_contact: str = 'Jane'
emergency_phone: str = '123-456-7890'
medical_info: MedicalInfo = None
patient_id: UUID = None
emergency_type: List[str] = None
location: str = None
additional_info: Optional[str] = None

inputs = base_inputs()

chat = None

# Route for beginning a conversation
@router.post("/begin-conversation/")
async def begin_conversation(inputs: Inputs, audiofile: UploadFile = File(...)):
@router.post("/begin_conversation/")
async def begin_conversation():
try:
audiofile = 'sample_1.mp3'
# Save the audio file to a temporary directory
audiofile_name = f"temp_mp3/{audiofile.filename}"
with open(audiofile_name, "wb") as buffer:
buffer.write(await audiofile.read())

audiofile_name = f"temp_mp3/{audiofile}"
# Upload the file to Google Gemini and start a conversation
myfile = genai.upload_file(audiofile_name)
prompt_test_data = f"""Imagine you are {inputs.patient.name}'s level-headed AI assistant whose purpose is to carry through a conversation
with a 911 dispatcher on behalf of {inputs.patient.name}. We are going to play a game where I am the dispatcher for 911.
prompt_test_data = f"""Imagine you are {inputs.name}'s level-headed AI assistant whose purpose is to carry through a conversation
with a 911 dispatcher on behalf of {inputs.name}. We are going to play a game where I am the dispatcher for 911.
For each prompt I will prompt you with updates on the current situation, and then in a voice audio file, I will pretend to be the dispatcher. You
will respond as if you are in a conversation with this dispatcher and be helpful and answer their questions.

Expand All @@ -39,15 +55,17 @@ async def begin_conversation(inputs: Inputs, audiofile: UploadFile = File(...)):
BE PASSIVE! Do NOT overload with information the DISPATCHER DOES NOT REQUEST! Answer WHAT THE DISPATCHER
ASKS FOR! DO NOT GIVE INFORMATION THAT THEY DON'T ASK FOR!

For context, {inputs.patient.name} is a {inputs.patient.age} year old {inputs.patient.gender}. Their medical conditions
include {inputs.patient.medical_info.medical_conditions}. He is allergic to {inputs.patient.medical_info.allergies} and for
medication he is on {inputs.patient.medical_info.medications}. We are in need of {inputs.dispatch_request.emergency_type}.
There are gunshots detected in the vacinity. {inputs.patient.name} has a heart rate of 120 bpm and a blood pressure of 130/80. The ADDRESS is
{inputs.dispatch_request.location}.
For context, {inputs.name} is a {inputs.age} year old {inputs.gender}. Their medical conditions
include {inputs.medical_conditions}. He is allergic to {inputs.allergies} and for
medication he is on {inputs.medications}. We are in need of {inputs.emergency_type}.
There are gunshots detected in the vacinity. {inputs.name} has a heart rate of 120 bpm and a blood pressure of 130/80. The ADDRESS is
{inputs.location}. {inputs.name}'s emergency contacts are {inputs.emergency_contact}, {inputs.emergency_phone}.



Remember, respond AS IF YOU ARE an assistant having a CONVERSATION with the DISPATCHER, ON BEHALF OF {inputs.patient.name}, and
Remember, respond AS IF YOU ARE an assistant having a CONVERSATION with the DISPATCHER, ON BEHALF OF {inputs.name}, and
whatever is in the AUDIO FILE is what the dispatcher has told you.
YOU ARE NOT {inputs.patient.name}! YOU ARE HIS AI ASSISTANT!
YOU ARE NOT {inputs.name}! YOU ARE HIS AI ASSISTANT!

Respond in Plain, unformatted text.
"""
Expand All @@ -56,38 +74,33 @@ async def begin_conversation(inputs: Inputs, audiofile: UploadFile = File(...)):
history=[{"role": "user", "parts": prompt_test_data}]
)
next_prompt_data = f"""The situation has updated. There are no changes in the situation. Remember, respond AS IF YOU ARE an assistant having a CONVERSATION with the DISPATCHER,
ON BEHALF OF {inputs.patient.name}, and the dispatcher has told you what is in the AUDIO FILE.
ON BEHALF OF {inputs.name}, and the dispatcher has told you what is in the AUDIO FILE.
BE PASSIVE! Do NOT overload with information the DISPATCHER DOES NOT REQUEST! Answer WHAT THE DISPATCHER
ASKS FOR! DO NOT GIVE INFORMATION THAT THEY DON'T ASK FOR! Respond in Plain, unformatted text."""
response = chat.send_message([myfile, next_prompt_data])

return {"response": response.text, "chat_id": id(chat)}
text_to_speech(response.text)
return {"response": response.text}

except Exception as e:
raise HTTPException(status_code=500, detail=str(e))

# Route for continuing a conversation
@router.post("/continue-conversation/")
async def continue_conversation(chat_id: str, audiofile: UploadFile = File(...)):
@router.post("/continue_conversation/")
async def continue_conversation():
try:
# Save the new audio file to a temporary directory
audiofile_name = f"temp_mp3/{audiofile.filename}"
with open(audiofile_name, "wb") as buffer:
buffer.write(await audiofile.read())
audiofile_name = f"temp_mp3/sample_2.mp3"

# Retrieve the chat session and continue the conversation
chat = model.get_chat_by_id(chat_id) # Assuming you have this method to retrieve a chat
myfile = genai.upload_file(audiofile_name) # Assuming you have this method to retrieve a chat
next_prompt_data = f"""The situation has updated. There are no changes in the situation. Remember, respond AS IF YOU ARE an assistant having a CONVERSATION with the DISPATCHER,
ON BEHALF OF the user! And the dispatcher has told you what is in the AUDIO FILE.
BE PASSIVE! Do NOT overload with information the DISPATCHER DOES NOT REQUEST! Answer WHAT THE DISPATCHER
ASKS FOR! DO NOT GIVE INFORMATION THAT THEY DON'T ASK FOR! Respond in Plain, unformatted text."""
myfile = genai.upload_file(audiofile_name)
response = chat.send_message([myfile, next_prompt_data])
audio_file_path = convert_text_to_speech(response.text)

return {"response": response.text
audio_file_path = text_to_speech(response.text)

}
return {"response": response.text}

except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
Binary file removed fastapi-backend/output.mp3
Binary file not shown.
Binary file added fastapi-backend/response_audio.mp3
Binary file not shown.
5 changes: 4 additions & 1 deletion react-native-frontend/app/app/(tabs)/_layout.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ export default function TabLayout() {
// to prevent a hydration error in React Navigation v6.
headerShown: useClientOnlyValue(false, true),
}}>
{/* First tab: Home (index) */}
<Tabs.Screen
name="index"
options={{
Expand All @@ -49,10 +50,12 @@ export default function TabLayout() {
),
}}
/>

{/* Second tab: My Information */}
<Tabs.Screen
name="user-information"
options={{
title: 'User Information',
title: 'my information',
tabBarIcon: ({ color }) => <TabBarIcon name="gears" color={color} />,
}}
/>
Expand Down
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
100 changes: 100 additions & 0 deletions react-native-frontend/app/app/(tabs)/emergency-responder.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
import React, { useState } from 'react';
import { StyleSheet, Button, View, Image} from 'react-native';
import axios from 'axios';
import { Text } from '@/components/Themed';
export default function TabTwoScreen() {
const [isBegin, setIsBegin] = useState(false);
const [loading, setLoading] = useState(false);
const [error, setError] = useState(null);

const callApi = async () => {
if(!isBegin){
try {
const response = await axios.post('http://127.0.0.1:8000/begin_conversation/');

console.log('Response:', response.data);
// Optionally handle successful response (e.g., navigate or show a message)
} catch (err) {
console.error('Error calling API:', err);
setError('Failed to create patient.');
} finally {
setLoading(false);
}
setIsBegin(true);
} else {
try {
const response = await axios.post('http://127.0.0.1:8000/continue_conversation/');

console.log('Response:', response.data);
// Optionally handle successful response (e.g., navigate or show a message)
} catch (err) {
console.error('Error calling API:', err);
setError('Failed to create patient.');
} finally {
setLoading(false);
}
setIsBegin(false);
}
setLoading(true);
setError(null); // Reset any previous errors

};

return (
<View style={styles.container} >
<Image
source={require('./emergency dispatch responder.png')} // Ensure the path is correct
style={styles.text}
/>
<View style={styles.separator} Color='black' />
{error && <Text style={styles.error}>{error}</Text>}
<Image
source={require('../../assets/images/emergencybubble.png')} // Path to the local image
style={styles.image}
/>
<Button
title={loading ? "Loading..." : "Simulate dispatch"}
onPress={callApi}
disabled={loading}
/>
</View>
);
}

const styles = StyleSheet.create({
container: {
flex: 1,
alignItems: 'center',
justifyContent: 'center',
backgroundColor: 'white',
},
text: {
width: 350,
height: 350,
alignSelf: 'center',
resizeMode: 'contain',
marginBottom: -220,
marginTop: -100
},
title: {
fontSize: 20,
fontWeight: 'bold',
color: 'black',
fontFamily: 'Newsreader',
},
separator: {
marginVertical: 30,
height: 1,
width: '80%',
color: 'black',
},
error: {
color: 'red',
marginBottom: 10,
},
image: {
height: 300,
width: 300,
resizeMode: 'contain',
}
});
Loading