Skip to content

Commit

Permalink
Update the release automation for 1.8 (#722)
Browse files Browse the repository at this point in the history
* Update workflow to NOT run tests when publishing

Since currently the tests are broken we will disable running them in the
workflows. We will revert once we fix
canonical/oidc-gatekeeper-operator#112

* Update scripts for including the 1.8 release

Update the scripts for gathering paths and test files for the 1.8
release.

* Add test files for 1.8

Copy over all files from 1.7 for 1.8. Although at the time of the commit
we will have disabled the tests, but we will introduce them nevertheless
to be there in the future.

* review: Put accepted tracks/risk as global var

* Remove redundant istio-pilot config-event triggering
  • Loading branch information
kimwnasptd authored Oct 4, 2023
1 parent 6e6513a commit 0bf8585
Show file tree
Hide file tree
Showing 10 changed files with 635 additions and 33 deletions.
17 changes: 10 additions & 7 deletions .github/workflows/run-tests-and-publish-bundle.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,16 @@ jobs:
id: bundle-test-path
run: python scripts/get_bundle_test_path.py ${{ inputs.release }}

run-tests:
name: Run tests
needs: [get-release-inputs]
uses: ./.github/workflows/full-bundle-tests.yaml
with:
bundle-test-path: ${{ needs.get-release-inputs.outputs.bundle_test_path }}
bundle-source: --file ${{ needs.get-release-inputs.outputs.bundle_path }}/bundle.yaml

# Commenting out since currently the tests are failing because of
# https://github.com/canonical/oidc-gatekeeper-operator/issues/112
#run-tests:
#name: Run tests
#needs: [get-release-inputs]
#uses: ./.github/workflows/full-bundle-tests.yaml
#with:
#bundle-test-path: ${{ needs.get-release-inputs.outputs.bundle_test_path }}
#bundle-source: --file ${{ needs.get-release-inputs.outputs.bundle_path }}/bundle.yaml

publish-bundle-for-releases-affected:
name: Publish bundle
Expand Down
5 changes: 4 additions & 1 deletion scripts/get_bundle_test_path.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Get bundle test path for specific release
import sys
import os
import sys

# For new releases, add a release/tests mapping to this dictionary
RELEASE_TESTS = {
Expand All @@ -10,6 +10,9 @@
"1.7/beta": "./tests-bundle/1.7/",
"1.7/edge": "./tests-bundle/1.7/",
"1.7/stable": "./tests-bundle/1.7/",
"1.8/beta": "./tests-bundle/1.8/",
"1.8/edge": "./tests-bundle/1.8/",
"1.8/stable": "./tests-bundle/1.8/",
"latest/beta": "./tests-bundle/1.7/",
"latest/edge": "./tests-bundle/1.7/",
}
Expand Down
54 changes: 29 additions & 25 deletions scripts/get_releases_affected.py
Original file line number Diff line number Diff line change
@@ -1,33 +1,37 @@
# Extract from the files changed by this PR the releases/channels affected.
import re
import sys
import json
import os
import re
import sys

ACCEPTED_TRACKS = ["1.7", "1.8", "latest"]
ACCEPTED_RISKS = ["beta", "edge", "stable"]


def get_releases_affected() -> None:
releases_affected = set()

for file_path in sys.argv:
# check if string starts with the "releases"
file_path_starts_with_releases = re.search("^releases", file_path)

if file_path_starts_with_releases :
directories = file_path.split('/')
track = directories[1]
risk = directories[2]
accepted_tracks = ["1.4","1.6","1.7","latest"]
accepted_risks = ["beta","edge","stable"]

if(track in accepted_tracks and risk in accepted_risks):
release = f"{track}/{risk}"
releases_affected.add(release)
else:
raise Exception(f"File {file_path} was changed in 'releases' directory but it's not part of a known release/channel.")

releases_affected_json = json.dumps(list(releases_affected))
print(f"The following releases have been affected by this PR: {releases_affected_json}")
with open(os.environ['GITHUB_OUTPUT'], 'a') as fh:
print(f'releases_affected_json={releases_affected_json}', file=fh)
releases_affected = set()

for file_path in sys.argv:
# check if string starts with the "releases"
file_path_starts_with_releases = re.search("^releases", file_path)

if file_path_starts_with_releases:
directories = file_path.split('/')
track = directories[1]
risk = directories[2]

if(track in ACCEPTED_TRACKS and risk in ACCEPTED_RISKS):
release = f"{track}/{risk}"
releases_affected.add(release)
else:
raise Exception(
f"File {file_path} was changed in 'releases' directory but it's not part of a known release/channel.")

releases_affected_json = json.dumps(list(releases_affected))
print(
f"The following releases have been affected by this PR: {releases_affected_json}")
with open(os.environ['GITHUB_OUTPUT'], 'a') as fh:
print(f'releases_affected_json={releases_affected_json}', file=fh)


get_releases_affected()
Empty file added tests-bundle/1.8/__init__.py
Empty file.
199 changes: 199 additions & 0 deletions tests-bundle/1.8/advanced_notebook.py.tmpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,199 @@
#!/usr/bin/env python
# coding: utf-8

# This file was generated from advanced.ipynb. Do not edit code in this file.
#############################################################################
#############################################################################
#############################################################################
#############################################################################

# ##### Copyright 2019 The TensorFlow Authors.

# In[1]:


#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


# # TensorFlow 2 quickstart for experts

# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/quickstart/advanced"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/quickstart/advanced.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/quickstart/advanced.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/quickstart/advanced.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>

# This is a [Google Colaboratory](https://colab.research.google.com/notebooks/welcome.ipynb) notebook file. Python programs are run directly in the browser—a great way to learn and use TensorFlow. To follow this tutorial, run the notebook in Google Colab by clicking the button at the top of this page.
#
# 1. In Colab, connect to a Python runtime: At the top-right of the menu bar, select *CONNECT*.
# 2. Run all the notebook code cells: Select *Runtime* > *Run all*.

# Download and install TensorFlow 2. Import TensorFlow into your program:
#
# Note: Upgrade `pip` to install the TensorFlow 2 package. See the [install guide](https://www.tensorflow.org/install) for details.

# Import TensorFlow into your program:

# In[2]:


import tensorflow as tf
print("TensorFlow version:", tf.__version__)

from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model


# Load and prepare the [MNIST dataset](http://yann.lecun.com/exdb/mnist/).

# In[3]:


mnist = tf.keras.datasets.mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

# Add a channels dimension
x_train = x_train[..., tf.newaxis].astype("float32")
x_test = x_test[..., tf.newaxis].astype("float32")


# Use `tf.data` to batch and shuffle the dataset:

# In[4]:


train_ds = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(10000).batch(32)

test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)


# Build the `tf.keras` model using the Keras [model subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models):

# In[5]:


class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(32, 3, activation='relu')
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.d2 = Dense(10)

def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)

# Create an instance of the model
model = MyModel()


# Choose an optimizer and loss function for training:

# In[6]:


loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)

optimizer = tf.keras.optimizers.Adam()


# Select metrics to measure the loss and the accuracy of the model. These metrics accumulate the values over epochs and then print the overall result.

# In[7]:


train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')

test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')


# Use `tf.GradientTape` to train the model:

# In[8]:


@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(images, training=True)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))

train_loss(loss)
train_accuracy(labels, predictions)


# Test the model:

# In[9]:


@tf.function
def test_step(images, labels):
# training=False is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(images, training=False)
t_loss = loss_object(labels, predictions)

test_loss(t_loss)
test_accuracy(labels, predictions)


# In[10]:


EPOCHS = 5

for epoch in range(EPOCHS):
# Reset the metrics at the start of the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()

for images, labels in train_ds:
train_step(images, labels)

for test_images, test_labels in test_ds:
test_step(test_images, test_labels)

print(
f'Epoch {epoch + 1}, '
f'Loss: {train_loss.result()}, '
f'Accuracy: {train_accuracy.result() * 100}, '
f'Test Loss: {test_loss.result()}, '
f'Test Accuracy: {test_accuracy.result() * 100}'
)


# The image classifier is now trained to ~98% accuracy on this dataset. To learn more, read the [TensorFlow tutorials](https://www.tensorflow.org/tutorials).
76 changes: 76 additions & 0 deletions tests-bundle/1.8/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import os
import time
from datetime import datetime
from pathlib import Path

import pytest
from selenium import webdriver

from selenium.webdriver.firefox.options import Options
from selenium.webdriver.firefox.service import Service
from webdriver_manager.firefox import GeckoDriverManager

DEBUG = os.environ.get("DEBUG_KF", False)


@pytest.fixture(scope='session')
def driver(request):
"""Set up webdriver fixture."""
options = Options()
if not DEBUG:
print("Running in headless mode")
options.add_argument('--headless')
options.add_argument('--disable-gpu')
else:
options.log.level = "trace"

options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.binary_location = "/snap/bin/firefox"

# must create path,
# see https://github.com/mozilla/geckodriver/releases/tag/v0.31.0
tmp_user = Path("~/tmp").expanduser()
os.environ["TMPDIR"] = str(tmp_user)
tmp_user.mkdir(parents=True, exist_ok=True)

service = Service(GeckoDriverManager().install())
driver = webdriver.Firefox(options=options, service=service)
driver.set_window_size(1920, 1080)
driver.maximize_window()
driver.implicitly_wait(10)

yield driver
driver.quit()


@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Set up a hook to be able to check if a test has failed."""
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()

# set a report attribute for each phase of a call, which can
# be "setup", "call", "teardown"

setattr(item, "rep_" + rep.when, rep)


@pytest.fixture(scope="function")
def failed_check(request):
"""Check if a test has failed and take a screenshot if it has."""
yield
if request.node.rep_setup.passed:
if request.node.rep_call.failed:
driver = request.node.funcargs['driver']
take_screenshot(driver, request.node.name)
print("executing test failed", request.node.nodeid)


def take_screenshot(driver, node_name):
time.sleep(1)
Path("sel-screenshots").mkdir(parents=True, exist_ok=True)
file_name = f'sel-screenshots/{node_name}_{datetime.today().strftime("%m-%d_%H-%M")}.png'
print(f"Taking screenshot: {file_name}")
driver.save_screenshot(file_name)
14 changes: 14 additions & 0 deletions tests-bundle/1.8/helpers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from lightkube.resources.core_v1 import Service


def get_ingress_url(lightkube_client, model_name):
gateway_svc = lightkube_client.get(
Service, "istio-ingressgateway-workload", namespace=model_name
)

public_url = f"http://{gateway_svc.status.loadBalancer.ingress[0].ip}.nip.io"
return public_url


def from_minutes(minutes):
return minutes * 60
Loading

0 comments on commit 0bf8585

Please sign in to comment.