Skip to content

Commit

Permalink
Merge branch 'edge' into AUTH-866-add-transfer-flow-builder-2
Browse files Browse the repository at this point in the history
  • Loading branch information
sanni-t committed Dec 18, 2024
2 parents a03302b + 9f0bc7d commit 40dd2e9
Show file tree
Hide file tree
Showing 164 changed files with 10,278 additions and 2,388 deletions.
56 changes: 54 additions & 2 deletions .github/workflows/api-test-lint-deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,6 @@ jobs:
strategy:
matrix:
os: ['windows-2022', 'ubuntu-22.04', 'macos-latest']
# TODO(mc, 2022-02-24): expand this matrix to 3.8 and 3.9,
# preferably in a nightly cronjob on edge or something
python: ['3.10']
with-ot-hardware: ['true', 'false']
exclude:
Expand Down Expand Up @@ -128,6 +126,60 @@ jobs:
files: ./api/coverage.xml
flags: api

test-package:
name: 'installed package tests on ${{ matrix.os }}'
timeout-minutes: 5
strategy:
matrix:
os: ['ubuntu-22.04', 'macos-latest', 'windows-2022']
runs-on: '${{ matrix.os }}'
steps:
- uses: 'actions/checkout@v4'
- name: 'Fix actions/checkout odd handling of tags'
if: startsWith(github.ref, 'refs/tags')
run: |
git fetch -f origin ${{ github.ref }}:${{ github.ref }}
git checkout ${{ github.ref }}
- uses: 'actions/setup-python@v4'
with:
python-version: '3.10'
- name: Set up package-testing
id: setup
if: ${{ matrix.os != 'windows-2022' }}
working-directory: package-testing
shell: bash
run: make setup
- name: Set up package-testing (Windows)
id: setup-windows
if: ${{ matrix.os == 'windows-2022' }}
working-directory: package-testing
shell: pwsh
run: make setup-windows
- name: Run the tests
if: ${{ matrix.os != 'windows-2022' }}
shell: bash
id: test
working-directory: package-testing
run: make test
- name: Run the tests (Windows)
shell: pwsh
id: test-windows
working-directory: package-testing
run: make test-windows
- name: Save the test results
if: ${{ always() && steps.setup.outcome == 'success' || steps.setup-windows.outcome == 'success' }}
id: results
uses: actions/upload-artifact@v4
with:
name: package-test-results-${{ matrix.os }}
path: package-testing/results
- name: Set job summary
if: ${{ always() }}
run: |
echo "## Opentrons Package Test Results ${{matrix.os}}" >> $GITHUB_STEP_SUMMARY
echo "### Test Outcome: Unixy ${{ steps.test.outcome }} Windows: ${{ steps.test-windows.outcome }}" >> $GITHUB_STEP_SUMMARY
echo "[Download the test results artifact](${{steps.results.outputs.artifact-url}})" >> $GITHUB_STEP_SUMMARY
deploy:
name: 'deploy opentrons package'
needs: [test]
Expand Down
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -163,3 +163,5 @@ opentrons-robot-app.tar.gz
mock_dir
.npm-cache/
.eslintcache

package-testing/results
31 changes: 8 additions & 23 deletions abr-testing/abr_testing/data_collection/abr_google_drive.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,16 +34,13 @@ def create_data_dictionary(
runs_to_save: Union[Set[str], str],
storage_directory: str,
issue_url: str,
plate: str,
accuracy: Any,
hellma_plate_standards: List[Dict[str, Any]],
) -> Tuple[List[List[Any]], List[str], List[List[Any]], List[str], List[List[Any]]]:
) -> Tuple[List[List[Any]], List[str], List[List[Any]], List[str]]:
"""Pull data from run files and format into a dictionary."""
runs_and_robots: List[Any] = []
runs_and_lpc: List[Dict[str, Any]] = []
headers: List[str] = []
headers_lpc: List[str] = []
list_of_heights: List[List[Any]] = [[], [], [], [], [], [], [], []]
hellma_plate_orientation = False # default hellma plate is not rotated.
for filename in os.listdir(storage_directory):
file_path = os.path.join(storage_directory, filename)
Expand Down Expand Up @@ -103,12 +100,15 @@ def create_data_dictionary(
run_time_min = run_time.total_seconds() / 60
except ValueError:
pass # Handle datetime parsing errors if necessary
# Get protocol version #
version_number = read_robot_logs.get_protocol_version_number(file_results)

if run_time_min > 0:
run_row = {
"Robot": robot,
"Run_ID": run_id,
"Protocol_Name": protocol_name,
"Protocol Version": version_number,
"Software Version": software_version,
"Date": start_date,
"Start_Time": start_time_str,
Expand All @@ -130,13 +130,10 @@ def create_data_dictionary(
plate_reader_dict = read_robot_logs.plate_reader_commands(
file_results, hellma_plate_standards, hellma_plate_orientation
)
list_of_heights = read_robot_logs.liquid_height_commands(
file_results, list_of_heights
)
notes = {"Note1": "", "Jira Link": issue_url}
liquid_height = read_robot_logs.get_liquid_waste_height(file_results)
plate_measure = {
"Plate Measured": plate,
"End Volume Accuracy (%)": accuracy,
"Liquid Waste Height (mm)": liquid_height,
"Average Temp (oC)": "",
"Average RH(%)": "",
}
Expand Down Expand Up @@ -173,7 +170,6 @@ def create_data_dictionary(
headers,
transposed_runs_and_lpc,
headers_lpc,
list_of_heights,
)


Expand Down Expand Up @@ -211,26 +207,15 @@ def run(
headers,
transposed_runs_and_lpc,
headers_lpc,
list_of_heights,
) = create_data_dictionary(
missing_runs_from_gs,
storage_directory,
"",
"",
"",
hellma_plate_standards=file_values,
file_values,
)
start_row = google_sheet.get_index_row() + 1
google_sheet.batch_update_cells(transposed_runs_and_robots, "A", start_row, "0")
# Record Liquid Heights Found
google_sheet_ldf = google_sheets_tool.google_sheet(
credentials_path, google_sheet_name, 2
)
google_sheet_ldf.get_row(1)
start_row_lhd = google_sheet_ldf.get_index_row() + 1
google_sheet_ldf.batch_update_cells(
list_of_heights, "A", start_row_lhd, "2075262446"
)

# Add LPC to google sheet
google_sheet_lpc = google_sheets_tool.google_sheet(credentials_path, "ABR-LPC", 0)
start_row_lpc = google_sheet_lpc.get_index_row() + 1
Expand Down
87 changes: 64 additions & 23 deletions abr-testing/abr_testing/data_collection/abr_robot_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,35 @@
import sys
import json
import re
from pathlib import Path
import pandas as pd
from statistics import mean, StatisticsError
from abr_testing.tools import plate_reader


def retrieve_protocol_file(
protocol_id: str,
robot_ip: str,
storage: str,
) -> Path | str:
"""Find and copy protocol file on robot with error."""
protocol_dir = f"/var/lib/opentrons-robot-server/7.1/protocols/{protocol_id}"

print(f"FILE TO FIND: {protocol_dir}/{protocol_id}")
# Copy protocol file found in robot oto host computer
save_dir = Path(f"{storage}/protocol_errors")
command = ["scp", "-r", f"root@{robot_ip}:{protocol_dir}", save_dir]
try:
# If file found and copied return path to file
subprocess.run(command, check=True) # type: ignore
print("File transfer successful!")
return save_dir
except subprocess.CalledProcessError as e:
print(f"Error during file transfer: {e}")
# Return empty string if file can't be copied
return ""


def compare_current_trh_to_average(
robot: str,
start_time: Any,
Expand All @@ -38,9 +62,13 @@ def compare_current_trh_to_average(
# Find average conditions of errored time period
df_all_trh = pd.DataFrame(all_trh_data)
# Convert timestamps to datetime objects
df_all_trh["Timestamp"] = pd.to_datetime(
df_all_trh["Timestamp"], format="mixed", utc=True
).dt.tz_localize(None)
print(f'TIMESTAMP: {df_all_trh["Timestamp"]}')
try:
df_all_trh["Timestamp"] = pd.to_datetime(
df_all_trh["Timestamp"], format="mixed", utc=True
).dt.tz_localize(None)
except Exception:
print(f'The following timestamp is invalid: {df_all_trh["Timestamp"]}')
# Ensure start_time is timezone-naive
start_time = start_time.replace(tzinfo=None)
relevant_temp_rhs = df_all_trh[
Expand Down Expand Up @@ -245,20 +273,24 @@ def get_user_id(user_file_path: str, assignee_name: str) -> str:
return assignee_id


def get_error_runs_from_robot(ip: str) -> List[str]:
def get_error_runs_from_robot(ip: str) -> Tuple[List[str], List[str]]:
"""Get runs that have errors from robot."""
error_run_ids = []
protocol_ids = []
response = requests.get(
f"http://{ip}:31950/runs", headers={"opentrons-version": "3"}
)
run_data = response.json()
run_list = run_data.get("data", [])
for run in run_list:
run_id = run["id"]
protocol_id = run["protocolId"]
num_of_errors = len(run["errors"])
if not run["current"] and num_of_errors > 0:
error_run_ids.append(run_id)
return error_run_ids
# Protocol ID will identify the correct folder on the robot of the protocol file
protocol_ids.append(protocol_id)
return (error_run_ids, protocol_ids)


def get_robot_state(
Expand Down Expand Up @@ -335,7 +367,7 @@ def get_robot_state(


def get_run_error_info_from_robot(
ip: str, one_run: str, storage_directory: str
ip: str, one_run: str, storage_directory: str, protocol_found: bool
) -> Tuple[str, str, str, List[str], List[str], str, str]:
"""Get error information from robot to fill out ticket."""
description = dict()
Expand Down Expand Up @@ -369,6 +401,9 @@ def get_run_error_info_from_robot(
description["protocol_name"] = results["protocol"]["metadata"].get(
"protocolName", ""
)

# If Protocol was successfully retrieved from the robot
description["protocol_found_on_robot"] = protocol_found
# Get start and end time of run
start_time = datetime.strptime(
results.get("startedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
Expand Down Expand Up @@ -511,12 +546,21 @@ def get_run_error_info_from_robot(
users_file_path = ticket.get_jira_users(storage_directory)
assignee_id = get_user_id(users_file_path, assignee)
run_log_file_path = ""
protocol_found = False
try:
error_runs = get_error_runs_from_robot(ip)
error_runs, protocol_ids = get_error_runs_from_robot(ip)
except requests.exceptions.InvalidURL:
print("Invalid IP address.")
sys.exit()
if len(run_or_other) < 1:
# Retrieve the most recently run protocol file
protocol_files_path = retrieve_protocol_file(
protocol_ids[-1], ip, storage_directory
)
# Set protocol_found to true if python protocol was successfully copied over
if protocol_files_path:
protocol_found = True

one_run = error_runs[-1] # Most recent run with error.
(
summary,
Expand All @@ -526,7 +570,9 @@ def get_run_error_info_from_robot(
labels,
whole_description_str,
run_log_file_path,
) = get_run_error_info_from_robot(ip, one_run, storage_directory)
) = get_run_error_info_from_robot(
ip, one_run, storage_directory, protocol_found
)
else:
(
summary,
Expand Down Expand Up @@ -566,8 +612,15 @@ def get_run_error_info_from_robot(
# OPEN TICKET
issue_url = ticket.open_issue(issue_key)
# MOVE FILES TO ERROR FOLDER.
print(protocol_files_path)
error_files = [saved_file_path_calibration, run_log_file_path] + file_paths
error_folder_path = os.path.join(storage_directory, issue_key)

# Move protocol file(s) to error folder
if protocol_files_path:
for file in os.listdir(protocol_files_path):
error_files.append(os.path.join(protocol_files_path, file))

error_folder_path = os.path.join(storage_directory, "issue_key")
os.makedirs(error_folder_path, exist_ok=True)
for source_file in error_files:
try:
Expand All @@ -577,7 +630,7 @@ def get_run_error_info_from_robot(
shutil.move(source_file, destination_file)
except shutil.Error:
continue
# POST FILES TO TICKET
# POST ALL FILES TO TICKET
list_of_files = os.listdir(error_folder_path)
for file in list_of_files:
file_to_attach = os.path.join(error_folder_path, file)
Expand Down Expand Up @@ -614,28 +667,16 @@ def get_run_error_info_from_robot(
headers,
runs_and_lpc,
headers_lpc,
list_of_heights,
) = abr_google_drive.create_data_dictionary(
run_id,
error_folder_path,
issue_url,
"",
"",
hellma_plate_standards=file_values,
file_values,
)

start_row = google_sheet.get_index_row() + 1
google_sheet.batch_update_cells(runs_and_robots, "A", start_row, "0")
print("Wrote run to ABR-run-data")
# Record Liquid Heights Found
google_sheet_ldf = google_sheets_tool.google_sheet(
credentials_path, google_sheet_name, 4
)
start_row_lhd = google_sheet_ldf.get_index_row() + 1
google_sheet_ldf.batch_update_cells(
list_of_heights, "A", start_row_lhd, "1795535088"
)
print("wrote liquid heights found.")
# Add LPC to google sheet
google_sheet_lpc = google_sheets_tool.google_sheet(
credentials_path, "ABR-LPC", 0
Expand Down
Loading

0 comments on commit 40dd2e9

Please sign in to comment.