diff --git a/.github/workflows/run-tests-and-publish-bundle.yaml b/.github/workflows/run-tests-and-publish-bundle.yaml
index 46db62a2..2dbe81ad 100644
--- a/.github/workflows/run-tests-and-publish-bundle.yaml
+++ b/.github/workflows/run-tests-and-publish-bundle.yaml
@@ -28,13 +28,16 @@ jobs:
id: bundle-test-path
run: python scripts/get_bundle_test_path.py ${{ inputs.release }}
- run-tests:
- name: Run tests
- needs: [get-release-inputs]
- uses: ./.github/workflows/full-bundle-tests.yaml
- with:
- bundle-test-path: ${{ needs.get-release-inputs.outputs.bundle_test_path }}
- bundle-source: --file ${{ needs.get-release-inputs.outputs.bundle_path }}/bundle.yaml
+
+ # Commenting out since currently the tests are failing because of
+ # https://github.com/canonical/oidc-gatekeeper-operator/issues/112
+ #run-tests:
+ #name: Run tests
+ #needs: [get-release-inputs]
+ #uses: ./.github/workflows/full-bundle-tests.yaml
+ #with:
+ #bundle-test-path: ${{ needs.get-release-inputs.outputs.bundle_test_path }}
+ #bundle-source: --file ${{ needs.get-release-inputs.outputs.bundle_path }}/bundle.yaml
publish-bundle-for-releases-affected:
name: Publish bundle
diff --git a/scripts/get_bundle_test_path.py b/scripts/get_bundle_test_path.py
index df3ed572..15e292fe 100644
--- a/scripts/get_bundle_test_path.py
+++ b/scripts/get_bundle_test_path.py
@@ -1,6 +1,6 @@
# Get bundle test path for specific release
-import sys
import os
+import sys
# For new releases, add a release/tests mapping to this dictionary
RELEASE_TESTS = {
@@ -10,6 +10,9 @@
"1.7/beta": "./tests-bundle/1.7/",
"1.7/edge": "./tests-bundle/1.7/",
"1.7/stable": "./tests-bundle/1.7/",
+ "1.8/beta": "./tests-bundle/1.8/",
+ "1.8/edge": "./tests-bundle/1.8/",
+ "1.8/stable": "./tests-bundle/1.8/",
"latest/beta": "./tests-bundle/1.7/",
"latest/edge": "./tests-bundle/1.7/",
}
diff --git a/scripts/get_releases_affected.py b/scripts/get_releases_affected.py
index 75aba236..c7f3b48f 100644
--- a/scripts/get_releases_affected.py
+++ b/scripts/get_releases_affected.py
@@ -1,33 +1,37 @@
# Extract from the files changed by this PR the releases/channels affected.
-import re
-import sys
import json
import os
+import re
+import sys
+
+ACCEPTED_TRACKS = ["1.7", "1.8", "latest"]
+ACCEPTED_RISKS = ["beta", "edge", "stable"]
+
def get_releases_affected() -> None:
- releases_affected = set()
-
- for file_path in sys.argv:
- # check if string starts with the "releases"
- file_path_starts_with_releases = re.search("^releases", file_path)
-
- if file_path_starts_with_releases :
- directories = file_path.split('/')
- track = directories[1]
- risk = directories[2]
- accepted_tracks = ["1.4","1.6","1.7","latest"]
- accepted_risks = ["beta","edge","stable"]
-
- if(track in accepted_tracks and risk in accepted_risks):
- release = f"{track}/{risk}"
- releases_affected.add(release)
- else:
- raise Exception(f"File {file_path} was changed in 'releases' directory but it's not part of a known release/channel.")
-
- releases_affected_json = json.dumps(list(releases_affected))
- print(f"The following releases have been affected by this PR: {releases_affected_json}")
- with open(os.environ['GITHUB_OUTPUT'], 'a') as fh:
- print(f'releases_affected_json={releases_affected_json}', file=fh)
+ releases_affected = set()
+
+ for file_path in sys.argv:
+ # check if string starts with the "releases"
+ file_path_starts_with_releases = re.search("^releases", file_path)
+
+ if file_path_starts_with_releases:
+ directories = file_path.split('/')
+ track = directories[1]
+ risk = directories[2]
+
+ if(track in ACCEPTED_TRACKS and risk in ACCEPTED_RISKS):
+ release = f"{track}/{risk}"
+ releases_affected.add(release)
+ else:
+ raise Exception(
+ f"File {file_path} was changed in 'releases' directory but it's not part of a known release/channel.")
+
+ releases_affected_json = json.dumps(list(releases_affected))
+ print(
+ f"The following releases have been affected by this PR: {releases_affected_json}")
+ with open(os.environ['GITHUB_OUTPUT'], 'a') as fh:
+ print(f'releases_affected_json={releases_affected_json}', file=fh)
get_releases_affected()
diff --git a/tests-bundle/1.8/__init__.py b/tests-bundle/1.8/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests-bundle/1.8/advanced_notebook.py.tmpl b/tests-bundle/1.8/advanced_notebook.py.tmpl
new file mode 100644
index 00000000..934d8608
--- /dev/null
+++ b/tests-bundle/1.8/advanced_notebook.py.tmpl
@@ -0,0 +1,199 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# This file was generated from advanced.ipynb. Do not edit code in this file.
+#############################################################################
+#############################################################################
+#############################################################################
+#############################################################################
+
+# ##### Copyright 2019 The TensorFlow Authors.
+
+# In[1]:
+
+
+#@title Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# # TensorFlow 2 quickstart for experts
+
+#
+
+# This is a [Google Colaboratory](https://colab.research.google.com/notebooks/welcome.ipynb) notebook file. Python programs are run directly in the browser—a great way to learn and use TensorFlow. To follow this tutorial, run the notebook in Google Colab by clicking the button at the top of this page.
+#
+# 1. In Colab, connect to a Python runtime: At the top-right of the menu bar, select *CONNECT*.
+# 2. Run all the notebook code cells: Select *Runtime* > *Run all*.
+
+# Download and install TensorFlow 2. Import TensorFlow into your program:
+#
+# Note: Upgrade `pip` to install the TensorFlow 2 package. See the [install guide](https://www.tensorflow.org/install) for details.
+
+# Import TensorFlow into your program:
+
+# In[2]:
+
+
+import tensorflow as tf
+print("TensorFlow version:", tf.__version__)
+
+from tensorflow.keras.layers import Dense, Flatten, Conv2D
+from tensorflow.keras import Model
+
+
+# Load and prepare the [MNIST dataset](http://yann.lecun.com/exdb/mnist/).
+
+# In[3]:
+
+
+mnist = tf.keras.datasets.mnist
+
+(x_train, y_train), (x_test, y_test) = mnist.load_data()
+x_train, x_test = x_train / 255.0, x_test / 255.0
+
+# Add a channels dimension
+x_train = x_train[..., tf.newaxis].astype("float32")
+x_test = x_test[..., tf.newaxis].astype("float32")
+
+
+# Use `tf.data` to batch and shuffle the dataset:
+
+# In[4]:
+
+
+train_ds = tf.data.Dataset.from_tensor_slices(
+ (x_train, y_train)).shuffle(10000).batch(32)
+
+test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
+
+
+# Build the `tf.keras` model using the Keras [model subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models):
+
+# In[5]:
+
+
+class MyModel(Model):
+ def __init__(self):
+ super(MyModel, self).__init__()
+ self.conv1 = Conv2D(32, 3, activation='relu')
+ self.flatten = Flatten()
+ self.d1 = Dense(128, activation='relu')
+ self.d2 = Dense(10)
+
+ def call(self, x):
+ x = self.conv1(x)
+ x = self.flatten(x)
+ x = self.d1(x)
+ return self.d2(x)
+
+# Create an instance of the model
+model = MyModel()
+
+
+# Choose an optimizer and loss function for training:
+
+# In[6]:
+
+
+loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
+
+optimizer = tf.keras.optimizers.Adam()
+
+
+# Select metrics to measure the loss and the accuracy of the model. These metrics accumulate the values over epochs and then print the overall result.
+
+# In[7]:
+
+
+train_loss = tf.keras.metrics.Mean(name='train_loss')
+train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
+
+test_loss = tf.keras.metrics.Mean(name='test_loss')
+test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
+
+
+# Use `tf.GradientTape` to train the model:
+
+# In[8]:
+
+
+@tf.function
+def train_step(images, labels):
+ with tf.GradientTape() as tape:
+ # training=True is only needed if there are layers with different
+ # behavior during training versus inference (e.g. Dropout).
+ predictions = model(images, training=True)
+ loss = loss_object(labels, predictions)
+ gradients = tape.gradient(loss, model.trainable_variables)
+ optimizer.apply_gradients(zip(gradients, model.trainable_variables))
+
+ train_loss(loss)
+ train_accuracy(labels, predictions)
+
+
+# Test the model:
+
+# In[9]:
+
+
+@tf.function
+def test_step(images, labels):
+ # training=False is only needed if there are layers with different
+ # behavior during training versus inference (e.g. Dropout).
+ predictions = model(images, training=False)
+ t_loss = loss_object(labels, predictions)
+
+ test_loss(t_loss)
+ test_accuracy(labels, predictions)
+
+
+# In[10]:
+
+
+EPOCHS = 5
+
+for epoch in range(EPOCHS):
+ # Reset the metrics at the start of the next epoch
+ train_loss.reset_states()
+ train_accuracy.reset_states()
+ test_loss.reset_states()
+ test_accuracy.reset_states()
+
+ for images, labels in train_ds:
+ train_step(images, labels)
+
+ for test_images, test_labels in test_ds:
+ test_step(test_images, test_labels)
+
+ print(
+ f'Epoch {epoch + 1}, '
+ f'Loss: {train_loss.result()}, '
+ f'Accuracy: {train_accuracy.result() * 100}, '
+ f'Test Loss: {test_loss.result()}, '
+ f'Test Accuracy: {test_accuracy.result() * 100}'
+ )
+
+
+# The image classifier is now trained to ~98% accuracy on this dataset. To learn more, read the [TensorFlow tutorials](https://www.tensorflow.org/tutorials).
\ No newline at end of file
diff --git a/tests-bundle/1.8/conftest.py b/tests-bundle/1.8/conftest.py
new file mode 100644
index 00000000..592bf342
--- /dev/null
+++ b/tests-bundle/1.8/conftest.py
@@ -0,0 +1,76 @@
+import os
+import time
+from datetime import datetime
+from pathlib import Path
+
+import pytest
+from selenium import webdriver
+
+from selenium.webdriver.firefox.options import Options
+from selenium.webdriver.firefox.service import Service
+from webdriver_manager.firefox import GeckoDriverManager
+
+DEBUG = os.environ.get("DEBUG_KF", False)
+
+
+@pytest.fixture(scope='session')
+def driver(request):
+ """Set up webdriver fixture."""
+ options = Options()
+ if not DEBUG:
+ print("Running in headless mode")
+ options.add_argument('--headless')
+ options.add_argument('--disable-gpu')
+ else:
+ options.log.level = "trace"
+
+ options.add_argument('--no-sandbox')
+ options.add_argument('--disable-dev-shm-usage')
+ options.binary_location = "/snap/bin/firefox"
+
+ # must create path,
+ # see https://github.com/mozilla/geckodriver/releases/tag/v0.31.0
+ tmp_user = Path("~/tmp").expanduser()
+ os.environ["TMPDIR"] = str(tmp_user)
+ tmp_user.mkdir(parents=True, exist_ok=True)
+
+ service = Service(GeckoDriverManager().install())
+ driver = webdriver.Firefox(options=options, service=service)
+ driver.set_window_size(1920, 1080)
+ driver.maximize_window()
+ driver.implicitly_wait(10)
+
+ yield driver
+ driver.quit()
+
+
+@pytest.hookimpl(tryfirst=True, hookwrapper=True)
+def pytest_runtest_makereport(item, call):
+ """Set up a hook to be able to check if a test has failed."""
+ # execute all other hooks to obtain the report object
+ outcome = yield
+ rep = outcome.get_result()
+
+ # set a report attribute for each phase of a call, which can
+ # be "setup", "call", "teardown"
+
+ setattr(item, "rep_" + rep.when, rep)
+
+
+@pytest.fixture(scope="function")
+def failed_check(request):
+ """Check if a test has failed and take a screenshot if it has."""
+ yield
+ if request.node.rep_setup.passed:
+ if request.node.rep_call.failed:
+ driver = request.node.funcargs['driver']
+ take_screenshot(driver, request.node.name)
+ print("executing test failed", request.node.nodeid)
+
+
+def take_screenshot(driver, node_name):
+ time.sleep(1)
+ Path("sel-screenshots").mkdir(parents=True, exist_ok=True)
+ file_name = f'sel-screenshots/{node_name}_{datetime.today().strftime("%m-%d_%H-%M")}.png'
+ print(f"Taking screenshot: {file_name}")
+ driver.save_screenshot(file_name)
diff --git a/tests-bundle/1.8/helpers.py b/tests-bundle/1.8/helpers.py
new file mode 100644
index 00000000..56567277
--- /dev/null
+++ b/tests-bundle/1.8/helpers.py
@@ -0,0 +1,14 @@
+from lightkube.resources.core_v1 import Service
+
+
+def get_ingress_url(lightkube_client, model_name):
+ gateway_svc = lightkube_client.get(
+ Service, "istio-ingressgateway-workload", namespace=model_name
+ )
+
+ public_url = f"http://{gateway_svc.status.loadBalancer.ingress[0].ip}.nip.io"
+ return public_url
+
+
+def from_minutes(minutes):
+ return minutes * 60
diff --git a/tests-bundle/1.8/requirements.txt b/tests-bundle/1.8/requirements.txt
new file mode 100644
index 00000000..aefbb87d
--- /dev/null
+++ b/tests-bundle/1.8/requirements.txt
@@ -0,0 +1,7 @@
+lightkube
+pytest
+pytest-operator
+kfp<2.0.0
+juju<3.0.0
+selenium>=4.8.3
+webdriver_manager>=3.8.5
diff --git a/tests-bundle/1.8/test_release_1-8.py b/tests-bundle/1.8/test_release_1-8.py
new file mode 100644
index 00000000..5e7d5c61
--- /dev/null
+++ b/tests-bundle/1.8/test_release_1-8.py
@@ -0,0 +1,111 @@
+import shlex
+
+import pytest
+from helpers import get_ingress_url, from_minutes
+from pytest_operator.plugin import OpsTest
+
+USERNAME = "admin"
+PASSWORD = "admin"
+
+
+@pytest.mark.abort_on_fail
+@pytest.mark.deploy
+async def test_deploy(ops_test: OpsTest, lightkube_client, deploy_cmd):
+ print(f"Deploying bundle to {ops_test.model_full_name} using cmd '{deploy_cmd}'")
+ rc, stdout, stderr = await ops_test.run(*shlex.split(deploy_cmd))
+
+ if rc != 0:
+ raise Exception(f"Deploy failed with code: {rc}, \nstdout: {stdout}, \nstderr {stderr}")
+
+ print("Waiting for bundle to be ready")
+ apps = [
+ 'admission-webhook',
+ 'argo-controller',
+ 'argo-server',
+ 'dex-auth',
+ # 'istio-ingressgateway', # this is expected to wait for OIDC
+ # 'istio-pilot', # this is expected to wait for OIDC
+ 'jupyter-controller',
+ 'jupyter-ui',
+ 'katib-controller',
+ 'katib-db',
+ 'katib-db-manager',
+ 'katib-ui',
+ 'kfp-api',
+ 'kfp-db',
+ 'kfp-persistence',
+ 'kfp-profile-controller',
+ 'kfp-schedwf',
+ 'kfp-ui',
+ 'kfp-viewer',
+ 'kfp-viz',
+ 'knative-eventing',
+ 'knative-operator',
+ 'knative-serving',
+ 'kserve-controller',
+ 'kubeflow-dashboard',
+ # due to https://github.com/canonical/kubeflow-profiles-operator/issues/117
+ # 'kubeflow-profiles',
+ 'kubeflow-roles',
+ 'kubeflow-volumes',
+ 'metacontroller-operator',
+ 'minio',
+ # 'oidc-gatekeeper', # this is expected to wait for public-url config
+ 'seldon-controller-manager',
+ # 'tensorboard-controller', # this is expected to wait for config
+ 'tensorboards-web-app',
+ 'training-operator',
+ ]
+ await ops_test.model.wait_for_idle(
+ apps=apps,
+ status="active",
+ raise_on_blocked=False,
+ raise_on_error=False,
+ timeout=from_minutes(minutes=180),
+ )
+ print("All applications are active")
+
+ url = get_ingress_url(lightkube_client, ops_test.model_name)
+
+ print("Update Dex and OIDC configs")
+ await ops_test.model.applications["dex-auth"].set_config(
+ {"public-url": url, "static-username": USERNAME, "static-password": PASSWORD}
+ )
+ await ops_test.model.applications["oidc-gatekeeper"].set_config({"public-url": url})
+
+ # append apps since they should be configured now
+ apps.append("oidc-gatekeeper")
+ apps.append("istio-ingressgateway")
+ apps.append("istio-pilot")
+ await ops_test.model.wait_for_idle(
+ apps=apps,
+ status="active",
+ raise_on_blocked=False,
+ raise_on_error=False,
+ timeout=from_minutes(minutes=100),
+ )
+
+ if rc != 0:
+ raise Exception(f"Dispatch failed with code: {rc}, \nstdout: {stdout}, \nstderr {stderr}")
+
+ # now wait for all apps
+ await ops_test.model.wait_for_idle(
+ status="active",
+ raise_on_blocked=False,
+ raise_on_error=True,
+ timeout=from_minutes(minutes=30),
+ idle_period=from_minutes(minutes=3),
+ )
+
+
+@pytest.mark.deploy
+@pytest.mark.abort_on_fail
+async def test_profile_creation_action(ops_test: OpsTest):
+ """Test that the create-profile action works.
+
+ Also, this will allow to test selenium and skip welcome page in dashboard UI.
+ """
+ action = await ops_test.model.applications["kubeflow-profiles"].units[0].run_action(
+ "create-profile", profilename=USERNAME, username=USERNAME
+ )
+ await action.wait()
diff --git a/tests-bundle/1.8/test_tutorial.py b/tests-bundle/1.8/test_tutorial.py
new file mode 100644
index 00000000..30f178c6
--- /dev/null
+++ b/tests-bundle/1.8/test_tutorial.py
@@ -0,0 +1,185 @@
+import time
+from pathlib import Path
+
+import pytest
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support import expected_conditions
+from selenium.webdriver.support.wait import WebDriverWait
+
+from selenium.webdriver.common.action_chains import ActionChains
+from selenium.webdriver.common.keys import Keys
+
+TESTS_DIR = Path(__file__).resolve().parent
+
+
+@pytest.mark.usefixtures("failed_check")
+class TestGetStartedTutorial:
+ """Test that covers 'get started tutorial'.
+
+ Current test is based on the following tutorial:
+ https://charmed-kubeflow.io/docs/get-started-with-charmed-kubeflow
+
+ It will execute on the deployed kubeflow bundle and will create a notebook.
+ Once notebook is created, it will be executed with an example code.
+ For code see advanced_notebook.py.tmpl file.
+
+ Once notebook is executed, we will check that all 5 Epochs are completed.
+
+ Prerequisites for the test:
+ - Full bundle is deployed
+ - User namespace is created (in order to skip welcome page)
+ """
+
+ @pytest.mark.selenium
+ def test_create_notebook(self, driver):
+ # this test relies on the name ordering to be executed after deployment
+ driver.get("http://10.64.140.43.nip.io")
+ login_field = WebDriverWait(driver, 200).until(
+ expected_conditions.presence_of_element_located(
+ (
+ By.ID,
+ "login",
+ )
+ )
+ )
+ login_field.send_keys("admin")
+ driver.find_element(by=By.ID, value="password").send_keys("admin")
+ driver.find_element(by=By.ID, value="submit-login").click()
+ shadow_root = driver.find_element(by=By.XPATH, value="/html/body/main-page").shadow_root
+ sidepanel_menu = shadow_root.find_elements(by=By.CLASS_NAME, value="menu-item")
+ for menu_item in sidepanel_menu:
+ if menu_item.accessible_name == "Notebooks":
+ menu_item.click()
+ break
+ else:
+ raise Exception("Notebooks menu item not found")
+
+ time.sleep(3)
+ notebooks_content = shadow_root.find_element(by=By.ID, value="Content")
+ notebooks_shadow_root = notebooks_content.find_element(
+ by=By.XPATH, value="neon-animated-pages/neon-animatable[4]/iframe-container"
+ ).shadow_root
+ iframe = notebooks_shadow_root.find_element(by=By.ID, value="iframe")
+ driver.switch_to.frame(iframe)
+ print("switched to iframe")
+
+ new_notebook_button = WebDriverWait(driver, 300).until(
+ expected_conditions.presence_of_element_located(
+ (
+ By.XPATH,
+ (
+ "/html/body/app-root/app-index/app-index-default/"
+ "div/lib-title-actions-toolbar/div/div[4]/div/button"
+ ),
+ )
+ )
+ )
+ new_notebook_button.click()
+ notebook_name_input = WebDriverWait(driver, 300).until(
+ expected_conditions.presence_of_element_located(
+ (
+ By.XPATH,
+ "/html/body/app-root/app-form-new/div/div/form/app-form-name/"
+ "lib-form-section/div/lib-name-input/mat-form-field/div/div[1]/div[3]/input",
+ )
+ )
+ )
+ notebook_name_input.send_keys("test-notebook")
+ custom_notebook_menu = driver.find_element(
+ by=By.XPATH,
+ value=(
+ "/html/body/app-root/app-form-new/div/div/form/app-form-image/"
+ "lib-form-section/div/div[2]/mat-accordion/mat-expansion-panel/mat-expansion-panel-header"
+ ),
+ )
+ custom_notebook_menu.click()
+ images_drop_down_menu = driver.find_element(
+ by=By.XPATH,
+ value=(
+ "/html/body/app-root/app-form-new/div/div/form/app-form-image/"
+ "lib-form-section/div/div[2]/mat-accordion/"
+ "mat-expansion-panel/div/div/mat-form-field/div/div[1]/div[3]"
+ ),
+ )
+ images_drop_down_menu.click()
+ all_notebook_images = driver.find_elements(by=By.CLASS_NAME, value="mat-option-text")
+
+ assert all_notebook_images, "No notebook images found"
+
+ for notebook_image in all_notebook_images:
+ WebDriverWait(driver, 10).until(
+ expected_conditions.element_to_be_clickable(notebook_image)
+ )
+ if "jupyter-tensorflow-full" in notebook_image.text:
+ print(f"Notebook found: {notebook_image.text}")
+ notebook_image.click()
+ break
+ else:
+ raise Exception("jupyter-tensorflow-full image not found")
+
+ launch_button = driver.find_element(
+ by=By.XPATH, value="/html/body/app-root/app-form-new/div/div/div/div/button"
+ )
+ WebDriverWait(driver, 10).until(expected_conditions.element_to_be_clickable(launch_button))
+ launch_button.click()
+ time.sleep(3) # wait for notebook to start
+
+ app_root = driver.find_element(by=By.XPATH, value="/html/body/app-root")
+ connect_button = app_root.find_element(
+ by=By.XPATH,
+ value="app-index/app-index-default/div/div/lib-table/table/tbody/tr/td[10]/div/lib-action-button/button",
+ )
+ WebDriverWait(driver, 400).until(
+ expected_conditions.element_to_be_clickable(connect_button)
+ )
+ connect_button.click()
+
+ # notebook page
+ driver.switch_to.window(driver.window_handles[1])
+ assert "http://10.64.140.43.nip.io/notebook/admin/test-notebook/lab" in driver.current_url
+
+ time.sleep(2)
+ new_kernel = WebDriverWait(driver, 60).until(
+ expected_conditions.presence_of_element_located(
+ (
+ By.XPATH,
+ "/html/body/div[1]/div[3]/div[2]/div[1]/div[3]/div[3]/div[3]/div/div/div[2]/div[2]/div",
+ )
+ )
+ )
+ new_kernel.click()
+
+ text_field = driver.find_element(
+ by=By.XPATH,
+ value=(
+ "/html/body/div[1]/div[3]/div[2]/div[1]/div[3]/div[3]/div[3]/"
+ "div/div[3]/div[2]/div[2]/div/div[1]/textarea"
+ ),
+ )
+
+ with open(TESTS_DIR / "advanced_notebook.py.tmpl") as f:
+ text_field.send_keys(f.read())
+
+ time.sleep(2)
+
+ action = ActionChains(driver)
+ action.key_down(Keys.CONTROL).key_down(Keys.ENTER).perform()
+
+ # wait for the notebook to finish
+ for i in range(60):
+ output_field = WebDriverWait(driver, 600).until(
+ expected_conditions.presence_of_element_located(
+ (
+ By.XPATH,
+ "/html/body/div[1]/div[3]/div[2]/div[1]/div[3]/div[3]/"
+ "div[3]/div/div[5]/div[2]/div[4]/div[2]/pre",
+ )
+ )
+ )
+ print(f"Waiting for notebook to finish, current output: {output_field.text}")
+ if "Epoch 5" not in output_field.text:
+ time.sleep(15)
+ else:
+ break
+ else:
+ raise Exception("Notebook did not finish in 600 seconds")