-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbackend.py
More file actions
117 lines (98 loc) · 4.18 KB
/
backend.py
File metadata and controls
117 lines (98 loc) · 4.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
from flask import Flask, request, jsonify
from flask_cors import CORS
import base64
import requests
from PIL import Image
import io
app = Flask(__name__)
CORS(app)
def encode_image_to_base64(image_file):
"""
Encodes a PIL Image object to base64 string.
"""
try:
buffered = io.BytesIO()
image_file.save(buffered, format="PNG")
return base64.b64encode(buffered.getvalue()).decode("utf-8")
except Exception as e:
print(f"Error encoding image: {e}")
return None
def create_image_collage(images):
"""
Creates a collage from the list of images and returns it as a PIL Image.
"""
try:
# Open images and calculate collage size
pil_images = [Image.open(image).convert("RGBA") for image in images]
widths, heights = zip(*(img.size for img in pil_images))
# Define collage size: side-by-side layout
total_width = sum(widths)
max_height = max(heights)
# Create a blank image with the calculated dimensions
collage = Image.new('RGBA', (total_width, max_height), (255, 255, 255, 0))
# Paste images side by side
x_offset = 0
for img in pil_images:
collage.paste(img, (x_offset, 0))
x_offset += img.width
return collage
except Exception as e:
print(f"Error creating collage: {e}")
return None
def call_llava_api(prompt, image):
"""
Calls the Ollama API to generate testing instructions.
"""
url = "http://localhost:11434/api/generate"
headers = {"Content-Type": "application/json"}
payload = {
"model": "llava",
"prompt": prompt,
"stream": False,
"images": [image] # Single collage image
}
try:
response = requests.post(url, json=payload, headers=headers)
response.raise_for_status() # This will raise an HTTPError if the response was an error
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error calling Ollama API: {e}")
return {"error": str(e)}
def format_instructions(text):
"""
Formats the testing instructions text to be more readable.
"""
formatted_text = text.replace('**', '<b>').replace('**', '</b>') # Convert Markdown bold to HTML
formatted_text = formatted_text.replace('1. ', '<br><b>1.</b> ') # Numbered list formatting
formatted_text = formatted_text.replace('- ', '<br>• ') # Bullet point formatting
formatted_text = formatted_text.replace('\n', '<br>') # Line break formatting
return formatted_text
@app.route('/generate-instructions', methods=['POST'])
def generate_instructions():
context = request.form.get('context', '')
images = request.files.getlist('screenshots')
if not images:
return jsonify({"error": "Screenshots are required"}), 400
# Create a collage from the images
collage = create_image_collage(images)
if not collage:
return jsonify({"error": "Failed to create a collage from the uploaded images"}), 500
# Convert collage to base64
encoded_collage = encode_image_to_base64(collage)
if not encoded_collage:
return jsonify({"error": "Failed to encode the collage image"}), 500
# Create the prompt for the Llava model
prompt = f"Generate detailed testing instructions for the given collage of screenshots the following context: {context}. The test case should include a description, pre-conditions, testing steps, and expected results for all images in the collage."
# Call the Ollama API
llava_response = call_llava_api(prompt, encoded_collage)
# Check if the response contains the necessary information
if 'error' in llava_response:
return jsonify({"error": "Failed to generate instructions from Llava API", "details": llava_response.get("error")}), 500
# Format and return the "response" field from the Llava API response
if 'response' in llava_response:
formatted_response = format_instructions(llava_response['response'])
return jsonify({"response": formatted_response})
else:
return jsonify({"error": "No response generated by Llava API"}), 500
if __name__ == '__main__':
app.run(debug=True)