Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

IRQ save/restore #4714

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 49 additions & 0 deletions .github/actions/veristat_baseline_compare/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
name: 'run-veristat'
description: 'Run veristat benchmark'
inputs:
veristat_output:
description: 'Veristat output filepath'
required: true
baseline_name:
description: 'Veristat baseline cache name'
required: true
runs:
using: "composite"
steps:
- uses: actions/upload-artifact@v4
with:
name: ${{ inputs.baseline_name }}
if-no-files-found: error
path: ${{ github.workspace }}/${{ inputs.veristat_output }}

# For pull request:
# - get baseline log from cache
# - compare it to current run
- if: ${{ github.event_name == 'pull_request' }}
uses: actions/cache/restore@v4
with:
key: ${{ inputs.baseline_name }}
restore-keys: |
${{ inputs.baseline_name }}-
path: '${{ github.workspace }}/${{ inputs.baseline_name }}'

- if: ${{ github.event_name == 'pull_request' }}
name: Show veristat comparison
shell: bash
run: ./.github/scripts/compare-veristat-results.sh
env:
BASELINE_PATH: ${{ github.workspace }}/${{ inputs.baseline_name }}
VERISTAT_OUTPUT: ${{ inputs.veristat_output }}

# For push: just put baseline log to cache
- if: ${{ github.event_name == 'push' }}
shell: bash
run: |
mv "${{ github.workspace }}/${{ inputs.veristat_output }}" \
"${{ github.workspace }}/${{ inputs.baseline_name }}"
- if: ${{ github.event_name == 'push' }}
uses: actions/cache/save@v4
with:
key: ${{ inputs.baseline_name }}-${{ github.run_id }}
path: '${{ github.workspace }}/${{ inputs.baseline_name }}'
18 changes: 18 additions & 0 deletions .github/scripts/compare-veristat-results.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#!/bin/bash

if [[ ! -f "${BASELINE_PATH}" ]]; then
echo "# No ${BASELINE_PATH} available" >> "${GITHUB_STEP_SUMMARY}"

echo "No ${BASELINE_PATH} available"
echo "Printing veristat results"
cat "${VERISTAT_OUTPUT}"

exit
fi

selftests/bpf/veristat \
--output-format csv \
--emit file,prog,verdict,states \
--compare "${BASELINE_PATH}" "${VERISTAT_OUTPUT}" > compare.csv

python3 ./.github/scripts/veristat_compare.py compare.csv
197 changes: 197 additions & 0 deletions .github/scripts/matrix.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,197 @@
#!/usr/bin/env python3

import os
import dataclasses
import json

from enum import Enum
from typing import Any, Dict, List, Final, Set, Union

MANAGED_OWNER: Final[str] = "kernel-patches"
MANAGED_REPOS: Final[Set[str]] = {
f"{MANAGED_OWNER}/bpf",
f"{MANAGED_OWNER}/vmtest",
}

DEFAULT_RUNNER: Final[str] = "ubuntu-24.04"
DEFAULT_LLVM_VERSION: Final[int] = 17
DEFAULT_SELF_HOSTED_RUNNER_TAGS: Final[List[str]] = ["self-hosted", "docker-noble-main"]


class Arch(str, Enum):
"""
CPU architecture supported by CI.
"""

AARCH64 = "aarch64"
S390X = "s390x"
X86_64 = "x86_64"


class Compiler(str, Enum):
GCC = "gcc"
LLVM = "llvm"


@dataclasses.dataclass
class Toolchain:
compiler: Compiler
# This is relevant ONLY for LLVM and should not be required for GCC
version: int

@property
def short_name(self) -> str:
return str(self.compiler.value)

@property
def full_name(self) -> str:
if self.compiler == Compiler.GCC:
return self.short_name

return f"{self.short_name}-{self.version}"

def to_dict(self) -> Dict[str, Union[str, int]]:
return {
"name": self.short_name,
"fullname": self.full_name,
"version": self.version,
}


@dataclasses.dataclass
class BuildConfig:
arch: Arch
toolchain: Toolchain
kernel: str = "LATEST"
run_veristat: bool = False
parallel_tests: bool = False
build_release: bool = False

@property
def runs_on(self) -> List[str]:
if is_managed_repo():
return DEFAULT_SELF_HOSTED_RUNNER_TAGS + [self.arch.value]
return [DEFAULT_RUNNER]

@property
def build_runs_on(self) -> List[str]:
if is_managed_repo():
# Build s390x on x86_64
return DEFAULT_SELF_HOSTED_RUNNER_TAGS + [
self.arch.value == "s390x" and Arch.X86_64.value or self.arch.value,
]
return [DEFAULT_RUNNER]

@property
def tests(self) -> Dict[str, Any]:
tests_list = [
"test_progs",
"test_progs_parallel",
"test_progs_no_alu32",
"test_progs_no_alu32_parallel",
"test_verifier",
]

if self.arch.value != "s390x":
tests_list.append("test_maps")

if self.toolchain.version >= 18:
tests_list.append("test_progs_cpuv4")

if self.arch in [Arch.X86_64, Arch.AARCH64]:
tests_list.append("sched_ext")

if not self.parallel_tests:
tests_list = [test for test in tests_list if not test.endswith("parallel")]

return {"include": [generate_test_config(test) for test in tests_list]}

def to_dict(self) -> Dict[str, Any]:
return {
"arch": self.arch.value,
"toolchain": self.toolchain.to_dict(),
"kernel": self.kernel,
"run_veristat": self.run_veristat,
"parallel_tests": self.parallel_tests,
"build_release": self.build_release,
"runs_on": self.runs_on,
"tests": self.tests,
"build_runs_on": self.build_runs_on,
}


def is_managed_repo() -> bool:
return (
os.environ["GITHUB_REPOSITORY_OWNER"] == MANAGED_OWNER
and os.environ["GITHUB_REPOSITORY"] in MANAGED_REPOS
)


def set_output(name, value):
"""Write an output variable to the GitHub output file."""
with open(os.getenv("GITHUB_OUTPUT"), "a", encoding="utf-8") as file:
file.write(f"{name}={value}\n")


def generate_test_config(test: str) -> Dict[str, Union[str, int]]:
"""Create the configuration for the provided test."""
is_parallel = test.endswith("_parallel")
config = {
"test": test,
"continue_on_error": is_parallel,
# While in experimental mode, parallel jobs may get stuck
# anywhere, including in user space where the kernel won't detect
# a problem and panic. We add a second layer of (smaller) timeouts
# here such that if we get stuck in a parallel run, we hit this
# timeout and fail without affecting the overall job success (as
# would be the case if we hit the job-wide timeout). For
# non-experimental jobs, 360 is the default which will be
# superseded by the overall workflow timeout (but we need to
# specify something).
"timeout_minutes": 30 if is_parallel else 360,
}
return config


if __name__ == "__main__":
matrix = [
BuildConfig(
arch=Arch.X86_64,
toolchain=Toolchain(compiler=Compiler.GCC, version=DEFAULT_LLVM_VERSION),
run_veristat=True,
parallel_tests=True,
),
BuildConfig(
arch=Arch.X86_64,
toolchain=Toolchain(compiler=Compiler.LLVM, version=DEFAULT_LLVM_VERSION),
build_release=True,
),
BuildConfig(
arch=Arch.X86_64,
toolchain=Toolchain(compiler=Compiler.LLVM, version=18),
build_release=True,
),
BuildConfig(
arch=Arch.AARCH64,
toolchain=Toolchain(compiler=Compiler.GCC, version=DEFAULT_LLVM_VERSION),
),
# BuildConfig(
# arch=Arch.AARCH64,
# toolchain=Toolchain(
# compiler=Compiler.LLVM,
# version=DEFAULT_LLVM_VERSION
# ),
# ),
BuildConfig(
arch=Arch.S390X,
toolchain=Toolchain(compiler=Compiler.GCC, version=DEFAULT_LLVM_VERSION),
),
]

# Outside of those repositories we only run on x86_64
if not is_managed_repo():
matrix = [config for config in matrix if config.arch == Arch.X86_64]

json_matrix = json.dumps({"include": [config.to_dict() for config in matrix]})
print(json_matrix)
set_output("build_matrix", json_matrix)
75 changes: 75 additions & 0 deletions .github/scripts/tests/test_veristat_compare.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
#!/usr/bin/env python3

import unittest
from typing import Iterable, List

from ..veristat_compare import parse_table, VeristatFields


def gen_csv_table(records: Iterable[str]) -> List[str]:
return [
",".join(VeristatFields.headers()),
*records,
]


class TestVeristatCompare(unittest.TestCase):
def test_parse_table_ignore_new_prog(self):
table = gen_csv_table(
[
"prog_file.bpf.o,prog_name,N/A,success,N/A,N/A,1,N/A",
]
)
veristat_info = parse_table(table)
self.assertEqual(veristat_info.table, [])
self.assertFalse(veristat_info.changes)
self.assertFalse(veristat_info.new_failures)

def test_parse_table_ignore_removed_prog(self):
table = gen_csv_table(
[
"prog_file.bpf.o,prog_name,success,N/A,N/A,1,N/A,N/A",
]
)
veristat_info = parse_table(table)
self.assertEqual(veristat_info.table, [])
self.assertFalse(veristat_info.changes)
self.assertFalse(veristat_info.new_failures)

def test_parse_table_new_failure(self):
table = gen_csv_table(
[
"prog_file.bpf.o,prog_name,success,failure,MISMATCH,1,1,+0 (+0.00%)",
]
)
veristat_info = parse_table(table)
self.assertEqual(
veristat_info.table,
[["prog_file.bpf.o", "prog_name", "success -> failure (!!)", "+0.00 %"]],
)
self.assertTrue(veristat_info.changes)
self.assertTrue(veristat_info.new_failures)

def test_parse_table_new_changes(self):
table = gen_csv_table(
[
"prog_file.bpf.o,prog_name,failure,success,MISMATCH,0,0,+0 (+0.00%)",
"prog_file.bpf.o,prog_name_increase,failure,failure,MATCH,1,2,+1 (+100.00%)",
"prog_file.bpf.o,prog_name_decrease,success,success,MATCH,1,1,-1 (-100.00%)",
]
)
veristat_info = parse_table(table)
self.assertEqual(
veristat_info.table,
[
["prog_file.bpf.o", "prog_name", "failure -> success", "+0.00 %"],
["prog_file.bpf.o", "prog_name_increase", "failure", "+100.00 %"],
["prog_file.bpf.o", "prog_name_decrease", "success", "-100.00 %"],
],
)
self.assertTrue(veristat_info.changes)
self.assertFalse(veristat_info.new_failures)


if __name__ == "__main__":
unittest.main()
Loading
Loading