Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
TamarZanzouri committed Jul 18, 2024
2 parents 7fb4f11 + 8eb14ae commit 2b8eff0
Show file tree
Hide file tree
Showing 216 changed files with 4,819 additions and 2,081 deletions.
1 change: 1 addition & 0 deletions .github/workflows/app-test-build-deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,7 @@ jobs:
needs: [determine-build-type]
if: needs.determine-build-type.outputs.variants != '[]'
strategy:
fail-fast: false
matrix:
os: ['windows-2022', 'ubuntu-22.04', 'macos-latest']
variant: ${{fromJSON(needs.determine-build-type.outputs.variants)}}
Expand Down
4 changes: 2 additions & 2 deletions abr-testing/abr_testing/data_collection/abr_google_drive.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,13 +76,13 @@ def create_data_dictionary(
start_time = datetime.strptime(
file_results.get("startedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
)
adjusted_start_time = start_time - timedelta(hours=5)
adjusted_start_time = start_time - timedelta(hours=4)
start_date = str(adjusted_start_time.date())
start_time_str = str(adjusted_start_time).split("+")[0]
complete_time = datetime.strptime(
file_results.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
)
adjusted_complete_time = complete_time - timedelta(hours=5)
adjusted_complete_time = complete_time - timedelta(hours=4)
complete_time_str = str(adjusted_complete_time).split("+")[0]
run_time = complete_time - start_time
run_time_min = run_time.total_seconds() / 60
Expand Down
126 changes: 121 additions & 5 deletions abr-testing/abr_testing/data_collection/abr_robot_error.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,105 @@
"""Create ticket for robot with error."""
from typing import List, Tuple, Any, Dict
from typing import List, Tuple, Any, Dict, Optional
from abr_testing.data_collection import read_robot_logs, abr_google_drive, get_run_logs
import requests
import argparse
from abr_testing.automation import jira_tool, google_sheets_tool, google_drive_tool
import shutil
import os
import subprocess
from datetime import datetime, timedelta
import sys
import json
import re
import pandas as pd
from statistics import mean, StatisticsError


def compare_current_trh_to_average(
robot: str,
start_time: Any,
end_time: Optional[Any],
protocol_name: str,
storage_directory: str,
) -> str:
"""Get average temp/rh for errored run and compare to average."""
# Connect to ABR ambient conditions sheet
credentials_path = os.path.join(storage_directory, "credentials.json")
temprh_data_sheet = google_sheets_tool.google_sheet(
credentials_path, "ABR Ambient Conditions", 0
)
headers = temprh_data_sheet.get_row(1)
all_trh_data = temprh_data_sheet.get_all_data(expected_headers=headers)
# Connect to ABR-run-data sheet
abr_data = google_sheets_tool.google_sheet(credentials_path, "ABR-run-data", 0)
headers = abr_data.get_row(1)
all_run_data = abr_data.get_all_data(expected_headers=headers)
# Find average conditions of errored time period
df_all_trh = pd.DataFrame(all_trh_data)
# Convert timestamps to datetime objects
df_all_trh["Timestamp"] = pd.to_datetime(
df_all_trh["Timestamp"], format="mixed", utc=True
).dt.tz_localize(None)
# Ensure start_time is timezone-naive
start_time = start_time.replace(tzinfo=None)
relevant_temp_rhs = df_all_trh[
(df_all_trh["Robot"] == robot) & (df_all_trh["Timestamp"] >= start_time)
]
try:
avg_temp = round(mean(relevant_temp_rhs["Temp (oC)"]), 2)
avg_rh = round(mean(relevant_temp_rhs["Relative Humidity (%)"]), 2)
except StatisticsError:
# If there is one value assign it as the average.
if len(relevant_temp_rhs["Temp (oC)"]) == 1:
avg_temp = relevant_temp_rhs["Temp (oC)"][0]
avg_rh = relevant_temp_rhs["Relative Humidity (%)"][0]
else:
avg_temp = None
avg_rh = None
# Get AVG t/rh of runs w/ same robot & protocol newer than 3 wks old with no errors
weeks_ago_3 = start_time - timedelta(weeks=3)
df_all_run_data = pd.DataFrame(all_run_data)
df_all_run_data["Start_Time"] = pd.to_datetime(
df_all_run_data["Start_Time"], format="mixed", utc=True
).dt.tz_localize(None)
df_all_run_data["Errors"] = pd.to_numeric(df_all_run_data["Errors"])
df_all_run_data["Average Temp (oC)"] = pd.to_numeric(
df_all_run_data["Average Temp (oC)"]
)
common_filters = (
(df_all_run_data["Robot"] == robot)
& (df_all_run_data["Start_Time"] >= weeks_ago_3)
& (df_all_run_data["Start_Time"] <= start_time)
& (df_all_run_data["Errors"] < 1)
& (df_all_run_data["Average Temp (oC)"] > 1)
)

if protocol_name == "":
relevant_run_data = df_all_run_data[common_filters]
else:
relevant_run_data = df_all_run_data[
common_filters & (df_all_run_data["Protocol_Name"] == protocol_name)
]
# Calculate means of historical data
try:
historical_avg_temp = round(
mean(relevant_run_data["Average Temp (oC)"].astype(float)), 2
)
historical_avg_rh = round(
mean(relevant_run_data["Average RH(%)"].astype(float)), 2
)
except StatisticsError:
historical_avg_temp = None
historical_avg_rh = None
# Formats TEMP/RH message for ticket.
temp_rh_message = (
f"{len(relevant_run_data)} runs with temp/rh data for {robot} running {protocol_name}."
f" AVG TEMP (deg C): {historical_avg_temp}. AVG RH (%): {historical_avg_rh}."
f" AVG TEMP of ERROR: {avg_temp}. AVG RH of ERROR: {avg_rh}."
)
# Print out comparison string.
print(temp_rh_message)
return temp_rh_message


def compare_lpc_to_historical_data(
Expand Down Expand Up @@ -42,9 +131,9 @@ def compare_lpc_to_historical_data(
current_x = round(labware_dict["X"], 2)
current_y = round(labware_dict["Y"], 2)
current_z = round(labware_dict["Z"], 2)
avg_x = round(sum(x_float) / len(x_float), 2)
avg_y = round(sum(y_float) / len(y_float), 2)
avg_z = round(sum(z_float) / len(z_float), 2)
avg_x = round(mean(x_float), 2)
avg_y = round(mean(y_float), 2)
avg_z = round(mean(z_float), 2)

# Formats LPC message for ticket.
lpc_message = (
Expand Down Expand Up @@ -195,6 +284,15 @@ def get_robot_state(
components = ["Flex-RABR"]
components = match_error_to_component("RABR", reported_string, components)
print(components)
end_time = datetime.now()
print(end_time)
start_time = end_time - timedelta(hours=2)
print(start_time)
# Get current temp/rh compared to historical data
temp_rh_string = compare_current_trh_to_average(
parent, start_time, end_time, "", storage_directory
)
description["Robot Temp and RH Comparison"] = temp_rh_string
whole_description_str = (
"{"
+ "\n".join("{!r}: {!r},".format(k, v) for k, v in description.items())
Expand Down Expand Up @@ -242,6 +340,23 @@ def get_run_error_info_from_robot(
description["protocol_name"] = results["protocol"]["metadata"].get(
"protocolName", ""
)
# Get start and end time of run
start_time = datetime.strptime(
results.get("startedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
)
adjusted_start_time = start_time - timedelta(hours=4)
complete_time = datetime.strptime(
results.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
)
adjusted_complete_time = complete_time - timedelta(hours=4)
# Get average temp and rh of robot and protocol the error occurred on.
temp_rh_comparison = compare_current_trh_to_average(
parent,
adjusted_start_time,
adjusted_complete_time,
description["protocol_name"],
storage_directory,
)
# Get LPC coordinates of labware of failure
lpc_dict = results["labwareOffsets"]
labware_dict = results["labware"]
Expand Down Expand Up @@ -280,6 +395,7 @@ def get_run_error_info_from_robot(
if len(lpc_message) < 1:
lpc_message = "No LPC coordinates found in relation to error."
description["LPC Comparison"] = lpc_message
description["Robot Temp and RH Comparison"] = temp_rh_comparison
all_modules = abr_google_drive.get_modules(results)
whole_description = {**description, **all_modules}
whole_description_str = (
Expand Down Expand Up @@ -352,6 +468,7 @@ def get_run_error_info_from_robot(
email = args.email[0]
board_id = args.board_id[0]
reporter_id = args.reporter_id[0]
file_paths = read_robot_logs.get_logs(storage_directory, ip)
ticket = jira_tool.JiraTicket(url, api_token, email)
ticket.issues_on_board(board_id)
users_file_path = ticket.get_jira_users(storage_directory)
Expand Down Expand Up @@ -384,7 +501,6 @@ def get_run_error_info_from_robot(
saved_file_path_calibration, calibration = read_robot_logs.get_calibration_offsets(
ip, storage_directory
)
file_paths = read_robot_logs.get_logs(storage_directory, ip)

print(f"Making ticket for {summary}.")
# TODO: make argument or see if I can get rid of with using board_id.
Expand Down
21 changes: 15 additions & 6 deletions abr-testing/abr_testing/data_collection/read_robot_logs.py
Original file line number Diff line number Diff line change
Expand Up @@ -569,23 +569,32 @@ def get_calibration_offsets(

def get_logs(storage_directory: str, ip: str) -> List[str]:
"""Get Robot logs."""
log_types = ["api.log", "server.log", "serial.log", "touchscreen.log"]
log_types: List[Dict[str, Any]] = [
{"log type": "api.log", "records": 1000},
{"log type": "server.log", "records": 10000},
{"log type": "serial.log", "records": 10000},
{"log type": "touchscreen.log", "records": 1000},
]
all_paths = []
for log_type in log_types:
try:
log_type_name = log_type["log type"]
print(log_type_name)
log_records = int(log_type["records"])
print(log_records)
response = requests.get(
f"http://{ip}:31950/logs/{log_type}",
headers={"log_identifier": log_type},
params={"records": 5000},
f"http://{ip}:31950/logs/{log_type_name}",
headers={"log_identifier": log_type_name},
params={"records": log_records},
)
response.raise_for_status()
log_data = response.text
log_name = ip + "_" + log_type.split(".")[0] + ".log"
log_name = ip + "_" + log_type_name.split(".")[0] + ".log"
file_path = os.path.join(storage_directory, log_name)
with open(file_path, mode="w", encoding="utf-8") as file:
file.write(log_data)
except RuntimeError:
print(f"Request exception. Did not save {log_type}")
print(f"Request exception. Did not save {log_type_name}")
continue
all_paths.append(file_path)
# Get weston.log using scp
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3535,7 +3535,7 @@
"errors": [
{
"createdAt": "TIMESTAMP",
"detail": "ValueError [line 16]: Nozzle layout configuration of style SINGLE is currently unsupported.",
"detail": "ValueError [line 16]: Nozzle layout configuration of style SINGLE is unsupported in API Versions lower than 2.20.",
"errorCode": "4000",
"errorInfo": {},
"errorType": "ExceptionInProtocolError",
Expand All @@ -3544,10 +3544,10 @@
"wrappedErrors": [
{
"createdAt": "TIMESTAMP",
"detail": "ValueError: Nozzle layout configuration of style SINGLE is currently unsupported.",
"detail": "ValueError: Nozzle layout configuration of style SINGLE is unsupported in API Versions lower than 2.20.",
"errorCode": "4000",
"errorInfo": {
"args": "('Nozzle layout configuration of style SINGLE is currently unsupported.',)",
"args": "('Nozzle layout configuration of style SINGLE is unsupported in API Versions lower than 2.20.',)",
"class": "ValueError",
"traceback": " File \"/usr/local/lib/python3.10/site-packages/opentrons/protocols/execution/execute_python.py\", line N, in exec_run\n exec(\"run(__context)\", new_globs)\n\n File \"<string>\", line N, in <module>\n\n File \"Flex_S_v2_16_P1000_96_TC_PartialTipPickupSingle.py\", line N, in run\n\n File \"/usr/local/lib/python3.10/site-packages/opentrons/protocols/api_support/util.py\", line N, in _check_version_wrapper\n return decorated_obj(*args, **kwargs)\n\n File \"/usr/local/lib/python3.10/site-packages/opentrons/protocol_api/instrument_context.py\", line N, in configure_nozzle_layout\n raise ValueError(\n"
},
Expand Down
15 changes: 11 additions & 4 deletions api-client/src/dataFiles/uploadCsvFile.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,18 @@ export function uploadCsvFile(
config: HostConfig,
data: FileData
): ResponsePromise<UploadedCsvFileResponse> {
return request<UploadedCsvFileResponse>(
let formData

if (typeof data !== 'string') {
formData = new FormData()
formData.append('file', data)
} else {
formData = data
}
return request<UploadedCsvFileResponse, FormData | string>(
POST,
'/dataFiles',
null,
config,
data
formData,
config
)
}
13 changes: 11 additions & 2 deletions api-client/src/protocols/createProtocol.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,18 @@ import { POST, request } from '../request'
import type { ResponsePromise } from '../request'
import type { HostConfig } from '../types'
import type { Protocol } from './types'
import type { RunTimeParameterCreateData } from '../runs'
import type {
RunTimeParameterValuesCreateData,
RunTimeParameterFilesCreateData,
} from '../runs'

export function createProtocol(
config: HostConfig,
files: File[],
protocolKey?: string,
protocolKind?: string,
runTimeParameterValues?: RunTimeParameterCreateData
runTimeParameterValues?: RunTimeParameterValuesCreateData,
runTimeParameterFiles?: RunTimeParameterFilesCreateData
): ResponsePromise<Protocol> {
const formData = new FormData()
files.forEach(file => {
Expand All @@ -22,6 +26,11 @@ export function createProtocol(
'runTimeParameterValues',
JSON.stringify(runTimeParameterValues)
)
if (runTimeParameterFiles != null)
formData.append(
'runTimeParameterFiles',
JSON.stringify(runTimeParameterFiles)
)

return request<Protocol, FormData>(POST, '/protocols', formData, config)
}
12 changes: 9 additions & 3 deletions api-client/src/protocols/createProtocolAnalysis.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,21 +3,27 @@ import { POST, request } from '../request'
import type { ProtocolAnalysisSummary } from '@opentrons/shared-data'
import type { ResponsePromise } from '../request'
import type { HostConfig } from '../types'
import type { RunTimeParameterCreateData } from '../runs'
import type {
RunTimeParameterFilesCreateData,
RunTimeParameterValuesCreateData,
} from '../runs'

interface CreateProtocolAnalysisData {
runTimeParameterValues: RunTimeParameterCreateData
runTimeParameterValues: RunTimeParameterValuesCreateData
runTimeParameterFiles: RunTimeParameterFilesCreateData
forceReAnalyze: boolean
}

export function createProtocolAnalysis(
config: HostConfig,
protocolKey: string,
runTimeParameterValues?: RunTimeParameterCreateData,
runTimeParameterValues?: RunTimeParameterValuesCreateData,
runTimeParameterFiles?: RunTimeParameterFilesCreateData,
forceReAnalyze?: boolean
): ResponsePromise<ProtocolAnalysisSummary[]> {
const data = {
runTimeParameterValues: runTimeParameterValues ?? {},
runTimeParameterFiles: runTimeParameterFiles ?? {},
forceReAnalyze: forceReAnalyze ?? false,
}
const response = request<
Expand Down
8 changes: 2 additions & 6 deletions api-client/src/protocols/getCsvFiles.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
import { v4 as uuidv4 } from 'uuid'

// import { GET, request } from '../request'

// import type { ResponsePromise } from '../request'
Expand All @@ -25,18 +23,16 @@ export function getCsvFiles(
config: HostConfig,
protocolId: string
): Promise<{ data: UploadedCsvFilesResponse }> {
const fileIdOne = uuidv4()
const fileIdTwo = uuidv4()
const stub = {
data: {
files: [
{
id: fileIdOne,
id: '1',
createdAt: '2024-06-07T19:19:56.268029+00:00',
name: 'rtp_mock_file1.csv',
},
{
id: fileIdTwo,
id: '2',
createdAt: '2024-06-17T19:19:56.268029+00:00',
name: 'rtp_mock_file2.csv',
},
Expand Down
Loading

0 comments on commit 2b8eff0

Please sign in to comment.