-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathAI.py
More file actions
226 lines (192 loc) · 7.48 KB
/
AI.py
File metadata and controls
226 lines (192 loc) · 7.48 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
# Existing imports remain unchanged
from groq import Groq
from PIL import ImageGrab, Image
from openai import OpenAI
from os import system
from faster_whisper import WhisperModel
import speech_recognition as sr
import google.generativeai as genai
import pyperclip
import cv2
import pyaudio
import os
import time
import re
import sys
# Amazon Polly Neural Engine
import numpy as np
import sounddevice as sd
import boto3
from pydub import playback
import pydub
wake_word = 'jarvis'
groq_client = Groq(api_key='use api key')
genai.configure(api_key='AIzaSyChJH-G0i6epq3Px_UzVOnDWN4GJbjSyrU')
# openai_client = OpenAI(api_key='AIzaSyChJH-G0i6epq3Px_UzVOnDWN4GJbjSyrU')
web_cam = cv2.VideoCapture(0)
r = sr.Recognizer()
source = sr.Microphone()
sys_msg = (
'You are a multi-modal AI voice assistant. Your user may or may not have attached a photo for context '
'(either a screenshot or a webcam capture). Any photo has already been processed into a highly detailed '
'text prompt that will be attached to their transcribed voice prompt. Generate the most useful and '
'factual response possible, carefully considering all previous generated text in your response before ! '
'adding new tokens to the response. Do not expect or request images, just use the context if added. '
'Use all of the context of this conversation so your response is relevant to the conversation. Make '
'your responses clear and concise, avoiding any verbosity.'
)
convo = [{'role': 'system', 'content': sys_msg},]
generation_config = {
'temperature': 0.7,
'top_p': 1,
'top_k': 1,
'max_output_tokens': 512
}
safety_settings = [
{
'category':'HARM_CATEGORY_HARASSMENT',
'threshold': 'BLOCK_NONE'
},
{
'category':'HARM_CATEGORY_HATE_SPEECH',
'threshold': 'BLOCK_NONE'
},
{
'category':'HARM_CATEGORY_SEXUALLY_EXPLICIT',
'threshold': 'BLOCK_NONE'
},
{
'category':'HARM_CATEGORY_DANGEROUS_CONTENT',
'threshold': 'BLOCK_NONE'
},
]
model = genai.GenerativeModel('gemini-1.5-flash-latest',
generation_config=generation_config,
safety_settings=safety_settings)
num_cores = os.cpu_count()
whisper_size = 'base'
whisper_model = WhisperModel(
whisper_size,
device='cpu',
compute_type='int8',
cpu_threads=num_cores //2,
num_workers=num_cores //2
)
# Function to synthesize speech using Amazon Polly
def synthesize_speech(text, output_filename):
polly = boto3.client('polly', region_name='us-east-1')
response = polly.synthesize_speech(
VoiceId='Ruth',
OutputFormat='mp3',
Text=text,
Engine='neural'
)
with open(output_filename, 'wb') as f:
f.write(response['AudioStream'].read())
# Function to play audio file
def play_audio(file):
sound = pydub.AudioSegment.from_file(file, format="mp3")
playback.play(sound)
def groq_prompt(prompt, img_context):
if img_context:
prompt = f'USER PROMPT: {prompt}\n\n IMAGE CONTEXT: {img_context}'
convo.append({'role': 'user', 'content': prompt})
chat_completion = groq_client.chat.completions.create(messages=convo, model='llama3-70b-8192')
response = chat_completion.choices[0].message
convo.append(response)
return response.content
def function_call(prompt):
sys_msg = (
'You are an AI function calling model. You will determine whether extracting the users clipboard content, '
'taking a screenshot, capturing the webcam or calling no functions is best for a voice assistant to respond '
'to the users prompt. The webcam can be assumed to be a normal laptop webcam facing the user. You will '
'respond with only one selection from this list: ["extract clipboard", "take screenshot", "capture webcam", "None"] \n'
'Do not respond with anything but the most logical selection from that list with no explanations. Format the '
'function call name exactly as I listed.'
)
function_convo = [{'role': 'system', 'content': sys_msg},
{'role': 'user', 'content': prompt}]
chat_completion = groq_client.chat.completions.create(messages=function_convo, model='llama3-70b-8192')
response = chat_completion.choices[0].message
return response.content
def take_screenshot():
path = 'screenshot.jpg'
screenshot = ImageGrab.grab()
rgb_screenshot = screenshot.convert('RGB')
rgb_screenshot.save(path, quality=15)
def web_cam_capture():
if not web_cam.isOpened():
print('Webcam is not open')
exit()
path = 'webcam_capture.jpg'
ret, frame = web_cam.read()
cv2.imwrite(path, frame)
def get_clipboard_text():
clipboard_content = pyperclip.paste()
if isinstance(clipboard_content, str):
return clipboard_content
else:
print('No text found in clipboard')
return None
def vision_prompt(prompt, photo_path):
img = Image.open(photo_path)
prompt = (
'You are the vision analysis AI that provides semantic meaning from images to provide context '
'to send to another AI that will create a response to the user. Do not respond as the AI assistant '
'to the user. Instead take the user prompt input and try to extract all meaning from the photo '
'relevant to the user prompt. Then generate as much objective data about the image for the AI '
f'assistant who will respond to the user. \nUSER PROMPT: {prompt}'
)
response = model.generate_content([prompt, img])
return response.text
def speak(text):
output_filename = 'speech.mp3'
synthesize_speech(text, output_filename)
play_audio(output_filename)
def wav_to_text(audio_path):
segments, _ = whisper_model.transcribe(audio_path)
text = ''.join(segment.text for segment in segments)
return text
def callback(recognizer, audio):
prompt_audio_path = 'prompt.wav'
with open(prompt_audio_path, 'wb') as f:
f.write(audio.get_wav_data())
prompt_text = wav_to_text(prompt_audio_path)
clean_prompt = extract_prompt(prompt_text, wake_word)
if clean_prompt:
print(f'USER: {clean_prompt}')
call = function_call(clean_prompt)
if 'take screenshot' in call:
print('Taking Screenshot')
take_screenshot()
visual_context = vision_prompt(prompt=clean_prompt, photo_path='screenshot.jpg')
elif 'capture webcam' in call:
print('Capturing webcam')
web_cam_capture()
visual_context = vision_prompt(prompt=clean_prompt, photo_path='webcam_capture.jpg')
elif 'extract clipboard' in call:
print('Extracting clipboard text')
paste = get_clipboard_text()
clean_prompt = f'{clean_prompt}\n\n CLIPBOARD CONTENT: {paste}'
visual_context = None
else:
visual_context = None
response = groq_prompt(prompt=clean_prompt, img_context=visual_context)
print(response)
speak(response)
def start_listening():
with source as s:
r.adjust_for_ambient_noise(s, duration=2)
print('\nSay', wake_word, 'followed with your prompt. \n')
r.listen_in_background(source, callback)
while True:
time.sleep(0.5)
def extract_prompt(transcribed_text, wake_word):
pattern = rf'\b{re.escape(wake_word)}[\s,.?!]*([A-Za-z0-9].*)'
match = re.search(pattern, transcribed_text, re.IGNORECASE)
if match:
prompt = match.group(1).strip()
return prompt
else:
return None
start_listening()