From b6a6e665bc0da8862bbb899d615f9014dbd6f2e6 Mon Sep 17 00:00:00 2001 From: Le Li Date: Thu, 7 Nov 2024 06:04:39 +0000 Subject: [PATCH 01/10] Add Geekbench benchmark framework --- .../linux_benchmarks/geekbench_benchmark.py | 51 ++++++++++++++++++ .../windows_benchmarks/geekbench_benchmark.py | 52 +++++++++++++++++++ 2 files changed, 103 insertions(+) create mode 100644 perfkitbenchmarker/linux_benchmarks/geekbench_benchmark.py create mode 100644 perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py diff --git a/perfkitbenchmarker/linux_benchmarks/geekbench_benchmark.py b/perfkitbenchmarker/linux_benchmarks/geekbench_benchmark.py new file mode 100644 index 000000000..12b7ecb5c --- /dev/null +++ b/perfkitbenchmarker/linux_benchmarks/geekbench_benchmark.py @@ -0,0 +1,51 @@ +# Define the name of the benchmark as a string constant. +BENCHMARK_NAME = 'geekbench' + +# Define the configuration for the benchmark. +# This includes VM groups and any flags specific to this benchmark. +BENCHMARK_CONFIG = """ +geekbench: + description: > + Runs Geekbench 6 to evaluate system performance across CPU and GPU on + Linux or Windows platforms. + vm_groups: + default: + vm_spec: *default_single_core # Using a single-core VM setup as an example. +""" + +from perfkitbenchmarker import configs +from perfkitbenchmarker import sample + +def GetConfig(user_config): + """Returns the configuration for the benchmark.""" + return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) + +def Prepare(benchmark_spec): + """Sets up the environment on the VM for the benchmark. + + Args: + benchmark_spec: The benchmark specification. Contains all data required to + run the benchmark, including the VMs. + """ + pass + +def Run(benchmark_spec): + """Runs Geekbench on the VM and returns performance samples. + + Args: + benchmark_spec: The benchmark specification. Contains all data required to + run the benchmark, including the VMs. + + Returns: + A list of sample.Sample objects containing the results of the benchmark. + """ + return [] + +def Cleanup(benchmark_spec): + """Cleans up the environment on the VM after the benchmark. + + Args: + benchmark_spec: The benchmark specification. Contains all data required to + run the benchmark, including the VMs. + """ + pass diff --git a/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py b/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py new file mode 100644 index 000000000..449e2a37c --- /dev/null +++ b/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py @@ -0,0 +1,52 @@ +# Define the name of the benchmark as a string constant. +BENCHMARK_NAME = 'geekbench' + +# Define the configuration for the benchmark. +# This includes VM groups and any flags specific to this benchmark. +BENCHMARK_CONFIG = """ +geekbench: + description: > + Runs Geekbench 6 to evaluate system performance across CPU and GPU on + Linux or Windows platforms. + vm_groups: + default: + vm_spec: *default_single_core # Using a single-core VM setup as an example. +""" + +# Import necessary modules from PKB +from perfkitbenchmarker import configs +from perfkitbenchmarker import sample + +def GetConfig(user_config): + """Returns the configuration for the benchmark.""" + return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) + +def Prepare(benchmark_spec): + """Sets up the environment on the VM for the benchmark. + + Args: + benchmark_spec: The benchmark specification. Contains all data required to + run the benchmark, including the VMs. + """ + pass + +def Run(benchmark_spec): + """Runs Geekbench on the VM and returns performance samples. + + Args: + benchmark_spec: The benchmark specification. Contains all data required to + run the benchmark, including the VMs. + + Returns: + A list of sample.Sample objects containing the results of the benchmark. + """ + return [] + +def Cleanup(benchmark_spec): + """Cleans up the environment on the VM after the benchmark. + + Args: + benchmark_spec: The benchmark specification. Contains all data required to + run the benchmark, including the VMs. + """ + pass From 3db12b10db16c0061e540f6f4de9c5fdf2cab8d2 Mon Sep 17 00:00:00 2001 From: Le Li Date: Thu, 7 Nov 2024 20:48:36 +0000 Subject: [PATCH 02/10] add Install function with mock tests for Windows and Ubuntu --- .../linux_packages/geekbench.py | 33 +++++++++ .../windows_packages/geekbench.py | 22 ++++++ tests/linux_packages/geekbench_test.py | 69 +++++++++++++++++++ tests/windows_packages/geekbench_test.py | 69 +++++++++++++++++++ 4 files changed, 193 insertions(+) create mode 100644 perfkitbenchmarker/linux_packages/geekbench.py create mode 100644 perfkitbenchmarker/windows_packages/geekbench.py create mode 100644 tests/linux_packages/geekbench_test.py create mode 100644 tests/windows_packages/geekbench_test.py diff --git a/perfkitbenchmarker/linux_packages/geekbench.py b/perfkitbenchmarker/linux_packages/geekbench.py new file mode 100644 index 000000000..1dfbae6bb --- /dev/null +++ b/perfkitbenchmarker/linux_packages/geekbench.py @@ -0,0 +1,33 @@ +import posixpath +from perfkitbenchmarker import linux_packages + +# Define the Geekbench version and the URL to download the tarball +GEEKBENCH_VERSION = "6.3.0" +GEEKBENCH_URL = f'https://cdn.geekbench.com/Geekbench-{GEEKBENCH_VERSION}-Linux.tar.gz' + +# Set the directory where Geekbench will be installed +GEEKBENCH_DIR = posixpath.join(linux_packages.INSTALL_DIR, 'geekbench') + +# Define the path to the Geekbench executable +GEEKBENCH_EXEC = posixpath.join(GEEKBENCH_DIR, 'geekbench6') + +def _Install(vm): + """Installs the Geekbench package on the VM.""" + + # Create the installation directory for Geekbench + vm.RemoteCommand(f'mkdir -p {GEEKBENCH_DIR}') + + # Download and extract the Geekbench tarball directly to the installation directory + # `--strip-components=1` removes the top-level directory from the tarball + vm.RemoteCommand(f'wget -qO- {GEEKBENCH_URL} | tar xz -C {GEEKBENCH_DIR} --strip-components=1') + + # Make sure the Geekbench executable has the correct permissions to be run + vm.RemoteCommand(f'chmod +x {GEEKBENCH_EXEC}') + +def YumInstall(vm): + """Installs Geekbench on the VM for systems using the yum package manager.""" + _Install(vm) + +def AptInstall(vm): + """Installs Geekbench on the VM for systems using the apt package manager.""" + _Install(vm) diff --git a/perfkitbenchmarker/windows_packages/geekbench.py b/perfkitbenchmarker/windows_packages/geekbench.py new file mode 100644 index 000000000..af9d7c6de --- /dev/null +++ b/perfkitbenchmarker/windows_packages/geekbench.py @@ -0,0 +1,22 @@ +import ntpath + +# Define the Geekbench version and the URL to download the Windows installer +GEEKBENCH_VERSION = "6.3.0" +GEEKBENCH_EXE = f"Geekbench-{GEEKBENCH_VERSION}-WindowsSetup.exe" +GEEKBENCH_EXE_URL = f"https://cdn.geekbench.com/{GEEKBENCH_EXE}" + +def Install(vm): + """Installs the Geekbench package on the VM.""" + + # Create a directory for downloading the installer within the VM's temporary directory + geekbench_download_path = ntpath.join(vm.temp_dir, "geekbench", "") + vm.RemoteCommand(f"New-Item -Path {geekbench_download_path} -ItemType Directory") + + # Define the full path to where the installer will be downloaded + geekbench_exe = ntpath.join(geekbench_download_path, GEEKBENCH_EXE) + + # Download the Geekbench installer from the specified URL to the download path + vm.DownloadFile(GEEKBENCH_EXE_URL, geekbench_exe) + + # Run the Geekbench installer with silent installation options to avoid manual intervention + vm.RemoteCommand(f"{geekbench_exe} /SILENT /NORESTART Dir={geekbench_download_path}") diff --git a/tests/linux_packages/geekbench_test.py b/tests/linux_packages/geekbench_test.py new file mode 100644 index 000000000..93d561126 --- /dev/null +++ b/tests/linux_packages/geekbench_test.py @@ -0,0 +1,69 @@ +import unittest +from unittest import mock +from unittest.mock import patch +import posixpath + +# Mock constants for the expected paths used in `_Install` +GEEKBENCH_VERSION = "6.3.0" +GEEKBENCH_URL = f"https://cdn.geekbench.com/Geekbench-{GEEKBENCH_VERSION}-Linux.tar.gz" +GEEKBENCH_DIR = "/opt/pkb/geekbench" # Update to the actual install directory +GEEKBENCH_EXEC = posixpath.join(GEEKBENCH_DIR, 'geekbench6') + +# Import the Install function for Linux +from perfkitbenchmarker.linux_packages import geekbench + + +class TestGeekbenchLinuxInstall(unittest.TestCase): + """Unit test case for the Geekbench Install function in the Linux package. + + This test case verifies that the `_Install` function in `geekbench.py` + performs the expected commands for: + - Creating the installation directory on the virtual machine. + - Downloading and extracting the Geekbench tarball. + - Making the Geekbench executable accessible. + + The test case uses mocks to simulate file paths and command calls, + allowing verification of the function's behavior without executing actual + installation steps. + """ + + @patch('perfkitbenchmarker.linux_packages.geekbench.posixpath') + def test_install_geekbench_linux(self, mock_posixpath): + """Tests the `_Install` function for Linux installation. + + This method: + - Mocks a virtual machine (VM) object to track `RemoteCommand` calls. + - Sets up expectations for directory creation, file download, extraction, and permissions. + - Verifies that the `_Install` function sends the correct commands to the VM. + + Args: + mock_posixpath: A mock for the `posixpath` module used within the `_Install` function, + enabling control over path joining behavior. + """ + + # Mock VM object with RemoteCommand method + mock_vm = mock.Mock() + + # Define expected paths + expected_dir = GEEKBENCH_DIR + expected_exec_path = GEEKBENCH_EXEC + + # Mock posixpath joins to return expected paths + mock_posixpath.join.side_effect = lambda *args: posixpath.join(*args) + + # Call the Install function + geekbench._Install(mock_vm) + + # Print all RemoteCommand calls to verify + print("RemoteCommand calls made:", mock_vm.RemoteCommand.call_args_list) + + # Assertions to verify commands + mock_vm.RemoteCommand.assert_any_call(f'mkdir -p {expected_dir}') + mock_vm.RemoteCommand.assert_any_call( + f'wget -qO- {GEEKBENCH_URL} | tar xz -C {expected_dir} --strip-components=1' + ) + mock_vm.RemoteCommand.assert_any_call(f'chmod +x {expected_exec_path}') + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/windows_packages/geekbench_test.py b/tests/windows_packages/geekbench_test.py new file mode 100644 index 000000000..6b2876f8c --- /dev/null +++ b/tests/windows_packages/geekbench_test.py @@ -0,0 +1,69 @@ +import unittest +from unittest import mock +from unittest.mock import patch +import ntpath + +# Mock constants for the expected paths used in `Install` +GEEKBENCH_VERSION = "6.3.0" +GEEKBENCH_EXE = f"Geekbench-{GEEKBENCH_VERSION}-WindowsSetup.exe" +GEEKBENCH_EXE_URL = f"https://cdn.geekbench.com/{GEEKBENCH_EXE}" +GEEKBENCH_DIR = "geekbench" + +# Import the Install function for Windows +from perfkitbenchmarker.windows_packages import geekbench + + +class TestGeekbenchWindowsInstall(unittest.TestCase): + """Unit test case for the Geekbench Install function in the Windows package. + + This test case verifies that the `Install` function in `geekbench.py` + performs the expected commands for: + - Creating a download directory on the virtual machine. + - Downloading the Geekbench installer to the specified directory. + - Running the installer with silent install options. + + Mocks are used to replace actual file paths and commands with controlled + expectations, allowing the function's behavior to be verified without + performing real installations. + """ + + @patch('perfkitbenchmarker.windows_packages.geekbench.ntpath') + def test_install_geekbench_windows(self, mock_ntpath): + """Tests the `Install` function for Windows installation. + + This method: + - Mocks a virtual machine (VM) object. + - Sets up expectations for directory creation, file download, and installer execution. + - Verifies that the `Install` function sends the correct commands to the VM. + + Args: + mock_ntpath: A mock for the `ntpath` module used within the `Install` function, + allowing control over path joining behavior. + """ + + # Mock VM object with RemoteCommand and DownloadFile methods + mock_vm = mock.Mock() + mock_vm.temp_dir = "C:\\temp" + + # Expected paths for download directory and installer + expected_download_path = ntpath.join(mock_vm.temp_dir, GEEKBENCH_DIR, "") + expected_exe_path = ntpath.join(expected_download_path, GEEKBENCH_EXE) + + # Mock ntpath.join to simulate path joining + mock_ntpath.join.side_effect = lambda *args: ntpath.join(*args) + + # Call the Install function + geekbench.Install(mock_vm) + + # Assertions to verify commands + mock_vm.RemoteCommand.assert_any_call(f"New-Item -Path {expected_download_path} -ItemType Directory") + mock_vm.DownloadFile.assert_called_once_with(GEEKBENCH_EXE_URL, expected_exe_path) + mock_vm.RemoteCommand.assert_any_call(f"{expected_exe_path} /SILENT /NORESTART Dir={expected_download_path}") + + # Print all RemoteCommand and DownloadFile calls to verify + print("RemoteCommand calls made:", mock_vm.RemoteCommand.call_args_list) + print("DownloadFile calls made:", mock_vm.DownloadFile.call_args_list) + + +if __name__ == '__main__': + unittest.main() From 9ab0314b9b8dffdcf456037c69387db1106d910a Mon Sep 17 00:00:00 2001 From: Le Li Date: Fri, 8 Nov 2024 02:04:17 +0000 Subject: [PATCH 03/10] Implement ParseResults function and unit tests for Geekbench output parsing --- .../data/geekbench/geekbench_linux.txt | 224 +++++++++++++++++ .../data/geekbench/geekbench_windows.txt | 238 ++++++++++++++++++ .../linux_benchmarks/geekbench_benchmark.py | 141 +++++++++-- .../windows_benchmarks/geekbench_benchmark.py | 150 +++++++++-- tests/geekbench_benchmark_windows_test.py | 77 ++++++ .../geekbench_benchmark_linux_test.py | 85 +++++++ 6 files changed, 879 insertions(+), 36 deletions(-) create mode 100644 perfkitbenchmarker/data/geekbench/geekbench_linux.txt create mode 100644 perfkitbenchmarker/data/geekbench/geekbench_windows.txt create mode 100644 tests/geekbench_benchmark_windows_test.py create mode 100644 tests/linux_benchmarks/geekbench_benchmark_linux_test.py diff --git a/perfkitbenchmarker/data/geekbench/geekbench_linux.txt b/perfkitbenchmarker/data/geekbench/geekbench_linux.txt new file mode 100644 index 000000000..453729090 --- /dev/null +++ b/perfkitbenchmarker/data/geekbench/geekbench_linux.txt @@ -0,0 +1,224 @@ +CPU result: https://browser.geekbench.com/v6/cpu/8708408 +GPU result: https://browser.geekbench.com/v6/compute/3079319 +QEMU Standard PC (Q35 + ICH9, 2009) +1803 +Single-Core Score +5678 +Multi-Core Score +Geekbench 6.3.0 for Linux AVX2 +Result Information +Upload Date November 08 2024 12:27 AM +Views 4 +System Information +System Information +Operating System Debian GNU/Linux 12 (bookworm) +Model QEMU Standard PC (Q35 + ICH9, 2009) +Motherboard N/A +CPU Information +Name AMD EPYC Processor +Topology 1 Processor, 4 Cores +Identifier AuthenticAMD Family 23 Model 1 Stepping 2 +Base Frequency 2.60 GHz +Cluster 1 0 Cores +L1 Instruction Cache 64.0 KB x 4 +L1 Data Cache 32.0 KB x 4 +L2 Cache 512 KB x 4 +L3 Cache 8.00 MB x 1 +Memory Information +Size 15.58 GB +Single-Core Performance +Single-Core Score 1803 +File Compression +1793 +257.5 MB/sec + +Navigation +1615 +9.73 routes/sec + +HTML5 Browser +1946 +39.8 pages/sec + +PDF Renderer +1761 +40.6 Mpixels/sec + +Photo Library +1635 +22.2 images/sec + +Clang +1951 +9.61 Klines/sec + +Text Processing +1852 +148.3 pages/sec + +Asset Compression +1875 +58.1 MB/sec + +Object Detection +1045 +31.3 images/sec + +Background Blur +2433 +10.1 images/sec + +Horizon Detection +2346 +73.0 Mpixels/sec + +Object Remover +1782 +137.0 Mpixels/sec + +HDR +2001 +58.7 Mpixels/sec + +Photo Filter +1894 +18.8 images/sec + +Ray Tracer +1892 +1.83 Mpixels/sec + +Structure from Motion +1913 +60.6 Kpixels/sec + +Multi-Core Performance +Multi-Core Score 5678 +File Compression +4147 +595.5 MB/sec + +Navigation +5677 +34.2 routes/sec + +HTML5 Browser +6785 +138.9 pages/sec + +PDF Renderer +6159 +142.0 Mpixels/sec + +Photo Library +6108 +82.9 images/sec + +Clang +7476 +36.8 Klines/sec + +Text Processing +2320 +185.8 pages/sec + +Asset Compression +7093 +219.8 MB/sec + +Object Detection +3641 +109.0 images/sec + +Background Blur +6969 +28.8 images/sec + +Horizon Detection +8059 +250.8 Mpixels/sec + +Object Remover +5453 +419.3 Mpixels/sec + +HDR +6463 +189.7 Mpixels/sec + +Photo Filter +5628 +55.8 images/sec + +Ray Tracer +7476 +7.23 Mpixels/sec + +Structure from Motion +7505 +237.6 Kpixels/sec +QEMU Standard PC (Q35 + ICH9, 2009) +358153 +OpenCL Score +Geekbench 6.3.0 for Linux AVX2 +Result Information +Upload Date November 07 2024 09:35 PM +Views 5 +System Information +System Information +Operating System Ubuntu 24.04 LTS +Model QEMU Standard PC (Q35 + ICH9, 2009) +Motherboard N/A +CPU Information +Name Intel Xeon Gold 6434 +Topology 1 Processor, 16 Cores +Identifier GenuineIntel Family 6 Model 143 Stepping 8 +Base Frequency 3.70 GHz +Cluster 1 0 Cores +L1 Instruction Cache 32.0 KB x 16 +L1 Data Cache 32.0 KB x 16 +L2 Cache 4.00 MB x 16 +L3 Cache 16.0 MB x 1 +Memory Information +Size 393.43 GB +OpenCL Information +Platform Vendor NVIDIA Corporation +Platform Name NVIDIA CUDA +Device Vendor NVIDIA Corporation +Device Name NVIDIA L40 +Compute Units 142 +Maximum Frequency 2490 MHz +Device Memory 44.3 GB +OpenCL Performance +OpenCL Score 358153 +Background Blur +130203 +538.9 images/sec + +Face Detection +113618 +370.9 images/sec + +Horizon Detection +417389 +13.0 Gpixels/sec + +Edge Detection +491859 +18.2 Gpixels/sec + +Gaussian Blur +548280 +23.9 Gpixels/sec + +Feature Matching +59186 +2.33 Gpixels/sec + +Stereo Matching +2276330 +2.16 Tpixels/sec + +Particle Physics +1206812 +53112.8 FPS \ No newline at end of file diff --git a/perfkitbenchmarker/data/geekbench/geekbench_windows.txt b/perfkitbenchmarker/data/geekbench/geekbench_windows.txt new file mode 100644 index 000000000..be4aee899 --- /dev/null +++ b/perfkitbenchmarker/data/geekbench/geekbench_windows.txt @@ -0,0 +1,238 @@ +result link: https://browser.geekbench.com/v6/cpu/8695734 +Acer Nitro AN515-45 +1795 +Single-Core Score +6627 +Multi-Core Score +Geekbench 6.3.0 for Windows AVX2Valid +Result Information +Upload Date November 07 2024 07:05 AM +Views 5 +System Information +System Information +Operating System Microsoft Windows 10 Home (64-bit) +Model Acer Nitro AN515-45 +Motherboard CZ Scala_CAS +Power Plan Acer +CPU Information +Name AMD Ryzen 7 5800H +Topology 1 Processor, 8 Cores, 16 Threads +Identifier AuthenticAMD Family 25 Model 80 Stepping 0 +Base Frequency 3.20 GHz +Cluster 1 8 Cores +Maximum Frequency 4314 MHz +Package Socket FP6 +Codename Cezanne +L1 Instruction Cache 32.0 KB x 8 +L1 Data Cache 32.0 KB x 8 +L2 Cache 512 KB x 8 +L3 Cache 16.0 MB x 1 +Memory Information +Size 16.00 GB +Transfer Rate 3184 MT/s +Type DDR4 SDRAM +Channels 2 +Single-Core Performance +Single-Core Score 1795 +File Compression +1875 +269.3 MB/sec + +Navigation +1640 +9.88 routes/sec + +HTML5 Browser +1694 +34.7 pages/sec + +PDF Renderer +1795 +41.4 Mpixels/sec + +Photo Library +1702 +23.1 images/sec + +Clang +1808 +8.91 Klines/sec + +Text Processing +1716 +137.4 pages/sec + +Asset Compression +1909 +59.1 MB/sec + +Object Detection +999 +29.9 images/sec + +Background Blur +2342 +9.69 images/sec + +Horizon Detection +2573 +80.1 Mpixels/sec + +Object Remover +1744 +134.1 Mpixels/sec + +HDR +2120 +62.2 Mpixels/sec + +Photo Filter +2197 +21.8 images/sec + +Ray Tracer +1744 +1.69 Mpixels/sec + +Structure from Motion +1998 +63.3 Kpixels/sec + +Multi-Core Performance +Multi-Core Score 6627 +File Compression +3169 +455.1 MB/sec + +Navigation +8253 +49.7 routes/sec + +HTML5 Browser +5091 +104.2 pages/sec + +PDF Renderer +7590 +175.0 Mpixels/sec + +Photo Library +8266 +112.2 images/sec + +Clang +11929 +58.8 Klines/sec + +Text Processing +2315 +185.4 pages/sec + +Asset Compression +13665 +423.4 MB/sec + +Object Detection +2874 +86.0 images/sec + +Background Blur +6543 +27.1 images/sec + +Horizon Detection +8437 +262.5 Mpixels/sec + +Object Remover +9410 +723.5 Mpixels/sec + +HDR +8105 +237.8 Mpixels/sec + +Photo Filter +4776 +47.4 images/sec + +Ray Tracer +15486 +15.0 Mpixels/sec + +Structure from Motion +7647 +242.1 Kpixels/sec +Acer Nitro AN515-45 +88265 +OpenCL Score +Geekbench 6.3.0 for Windows AVX2Valid +Result Information +Upload Date November 07 2024 10:49 PM +Views 1 +System Information +System Information +Operating System Microsoft Windows 10 Home (64-bit) +Model Acer Nitro AN515-45 +Motherboard CZ Scala_CAS +Power Plan Acer +CPU Information +Name AMD Ryzen 7 5800H +Topology 1 Processor, 8 Cores, 16 Threads +Identifier AuthenticAMD Family 25 Model 80 Stepping 0 +Base Frequency 3.20 GHz +Cluster 1 8 Cores +Maximum Frequency 4314 MHz +Package Socket FP6 +Codename Cezanne +L1 Instruction Cache 32.0 KB x 8 +L1 Data Cache 32.0 KB x 8 +L2 Cache 512 KB x 8 +L3 Cache 16.0 MB x 1 +Memory Information +Size 16.00 GB +Transfer Rate 3184 MT/s +Type DDR4 SDRAM +Channels 2 +OpenCL Information +Platform Vendor NVIDIA Corporation +Platform Name NVIDIA CUDA +Device Vendor NVIDIA Corporation +Device Name NVIDIA GeForce RTX 3060 Laptop GPU +Compute Units 30 +Maximum Frequency 1425 MHz +Device Memory 6.00 GB +OpenCL Performance +OpenCL Score 88265 +Background Blur +40721 +168.5 images/sec + +Face Detection +25293 +82.6 images/sec + +Horizon Detection +126034 +3.92 Gpixels/sec + +Edge Detection +147170 +5.46 Gpixels/sec + +Gaussian Blur +101808 +4.44 Gpixels/sec + +Feature Matching +23461 +924.9 Mpixels/sec + +Stereo Matching +326910 +310.8 Gpixels/sec + +Particle Physics +246967 +10869.2 FPS + \ No newline at end of file diff --git a/perfkitbenchmarker/linux_benchmarks/geekbench_benchmark.py b/perfkitbenchmarker/linux_benchmarks/geekbench_benchmark.py index 12b7ecb5c..eba66841f 100644 --- a/perfkitbenchmarker/linux_benchmarks/geekbench_benchmark.py +++ b/perfkitbenchmarker/linux_benchmarks/geekbench_benchmark.py @@ -21,31 +21,138 @@ def GetConfig(user_config): return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) def Prepare(benchmark_spec): - """Sets up the environment on the VM for the benchmark. - - Args: - benchmark_spec: The benchmark specification. Contains all data required to - run the benchmark, including the VMs. + """ + Sets up the environment on the VM for the benchmark. """ pass def Run(benchmark_spec): - """Runs Geekbench on the VM and returns performance samples. - - Args: - benchmark_spec: The benchmark specification. Contains all data required to - run the benchmark, including the VMs. - - Returns: - A list of sample.Sample objects containing the results of the benchmark. + """ + Runs Geekbench on the VM and returns performance samples. """ return [] def Cleanup(benchmark_spec): - """Cleans up the environment on the VM after the benchmark. + """ + Cleans up the environment on the VM after the benchmark. + """ + pass + +def ParseResults(geekbench_output: str): + """ + Parses Geekbench benchmark results to extract metrics for Single-Core, Multi-Core, + and OpenCL performance tests. Each metric entry in the output represents a specific + test result with associated metadata. Args: - benchmark_spec: The benchmark specification. Contains all data required to - run the benchmark, including the VMs. + geekbench_output (str): Raw output from a Geekbench benchmark as a string. + + Returns: + List[Dict]: A list of dictionaries where each dictionary represents a parsed metric + sample. Each dictionary has the following structure: + + - "metric_name" (str): The name of the metric, describing the test and + performance category. Examples include "Single-Core File Compression" or "Multi-Core Score". + + - "metric_value" (float): The numerical result or score of the specific test. This could + be a throughput value, such as MB/sec, or a score in points. + + - "metric_unit" (str): The unit associated with the metric value. For example, units + can be "MB/sec" for throughput or "points" for scores. + + - "metric_metadata" (dict): Additional metadata about the test, including: + - "category" (str): The performance category, such as "Single-Core", "Multi-Core", or "OpenCL". + - "test" (str, optional): The specific test name within the category, such as "File Compression" + or "HTML5 Browser". This key is present for detailed test metrics. + - "score" (int, optional): The individual test score associated with the metric, where applicable. + + Example Output: + [ + { + "metric_name": "Single-Core Score", + "metric_value": 1803, + "metric_unit": "points", + "metric_metadata": {"category": "Single-Core"} + }, + { + "metric_name": "Single-Core File Compression", + "metric_value": 257.5, + "metric_unit": "MB/sec", + "metric_metadata": { + "category": "Single-Core", + "test": "File Compression", + "score": 1793 + } + } + ] """ - pass + + # Initialize a list to store the parsed samples + samples = [] + + # Track the current category (Single-Core, Multi-Core, or OpenCL) + current_category = None + current_metric_name = None + last_score = None # Store the last score for each test to add to throughput metadata + + # Split the output into lines for easier processing + lines = geekbench_output.splitlines() + + for line in lines: + line = line.strip() + + # Detect category headers + if "Single-Core Performance" in line: + current_category = "Single-Core" + elif "Multi-Core Performance" in line: + current_category = "Multi-Core" + elif "OpenCL Performance" in line: + current_category = "OpenCL" + + # Detect overall score lines, ensuring current_category is not None + elif "Score" in line and current_category: + try: + score = int(line.split()[-1].strip()) # Extract score value + samples.append({ + "metric_name": f"{current_category} Score", + "metric_value": score, + "metric_unit": "points", + "metric_metadata": {"category": current_category} + }) + except ValueError: + continue + + # Detect specific test names within a category + elif line and line.split()[0].isalpha(): + current_metric_name = line.strip() + + # Detect score line before throughput, storing score for metadata + elif current_metric_name and line.isdigit(): + last_score = int(line.strip()) # Store the score value for metadata + + # Detect throughput values with units (e.g., 269.3 MB/sec) + elif current_metric_name and line: + parts = line.strip().split() + try: + value = float(parts[0].strip()) # First part is the numeric value + unit = ' '.join(parts[1:]).strip() if len(parts) > 1 else 'points' # Remaining part is the unit + + # Add the parsed data as a sample, including the last_score in metadata + samples.append({ + "metric_name": f"{current_category} {current_metric_name}", + "metric_value": value, + "metric_unit": unit, + "metric_metadata": { + "category": current_category, + "test": current_metric_name, + "score": last_score + } + }) + + # Reset the metric name and score after processing + current_metric_name = None + last_score = None + except ValueError: + continue + + return samples diff --git a/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py b/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py index 449e2a37c..d2d612311 100644 --- a/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py +++ b/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py @@ -10,7 +10,7 @@ Linux or Windows platforms. vm_groups: default: - vm_spec: *default_single_core # Using a single-core VM setup as an example. + vm_spec: *default_single_core """ # Import necessary modules from PKB @@ -18,35 +18,147 @@ from perfkitbenchmarker import sample def GetConfig(user_config): - """Returns the configuration for the benchmark.""" + """ + Returns the configuration for the benchmark. + """ return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) def Prepare(benchmark_spec): - """Sets up the environment on the VM for the benchmark. - - Args: - benchmark_spec: The benchmark specification. Contains all data required to - run the benchmark, including the VMs. + """ + Sets up the environment on the VM for the benchmark. """ pass def Run(benchmark_spec): - """Runs Geekbench on the VM and returns performance samples. - - Args: - benchmark_spec: The benchmark specification. Contains all data required to - run the benchmark, including the VMs. - - Returns: - A list of sample.Sample objects containing the results of the benchmark. + """ + Runs Geekbench on the VM and returns performance samples. """ return [] def Cleanup(benchmark_spec): - """Cleans up the environment on the VM after the benchmark. + """ + Cleans up the environment on the VM after the benchmark. + """ + pass + +def ParseResults(geekbench_output: str): + """ + Parses Geekbench benchmark results to extract metrics for Single-Core, Multi-Core, + and OpenCL performance tests. Each metric entry in the output represents a specific + test result with associated metadata. Args: - benchmark_spec: The benchmark specification. Contains all data required to - run the benchmark, including the VMs. + geekbench_output (str): Raw output from a Geekbench benchmark as a string. + + Returns: + List[Dict]: A list of dictionaries where each dictionary represents a parsed metric + sample. Each dictionary has the following structure: + + - "metric_name" (str): The name of the metric, describing the test and + performance category. Examples include "Single-Core File Compression" or "Multi-Core Score". + + - "metric_value" (float): The numerical result or score of the specific test. This could + be a throughput value, such as MB/sec, or a score in points. + + - "metric_unit" (str): The unit associated with the metric value. For example, units + can be "MB/sec" for throughput or "points" for scores. + + - "metric_metadata" (dict): Additional metadata about the test, including: + - "category" (str): The performance category, such as "Single-Core", "Multi-Core", or "OpenCL". + - "test" (str, optional): The specific test name within the category, such as "File Compression" + or "HTML5 Browser". This key is present for detailed test metrics. + - "score" (int, optional): The individual test score associated with the metric, where applicable. + For instance, if a throughput value is provided, the corresponding score is also included. + + Example Output: + [ + { + "metric_name": "Single-Core Score", + "metric_value": 1795, + "metric_unit": "points", + "metric_metadata": {"category": "Single-Core"} + }, + { + "metric_name": "Single-Core File Compression", + "metric_value": 269.3, + "metric_unit": "MB/sec", + "metric_metadata": { + "category": "Single-Core", + "test": "File Compression", + "score": 1875 + } + } + ] """ - pass + + # Initialize a list to store the parsed samples + samples = [] + + # Track the current category (Single-Core, Multi-Core, or OpenCL) + current_category = None + current_metric_name = None + last_score = None + + # Split the output into lines for easier processing + lines = geekbench_output.splitlines() + + for line in lines: + line = line.strip() + # Detect category headers + if "Single-Core Performance" in line: + current_category = "Single-Core" + elif "Multi-Core Performance" in line: + current_category = "Multi-Core" + elif "OpenCL Performance" in line: + current_category = "OpenCL" + + # Detect overall score lines, ensuring current_category is not None + elif "Score" in line and current_category: + # Parse overall score based on the current category + try: + score = int(line.split()[-1]) + samples.append({ + "metric_name": f"{current_category} Score", + "metric_value": score, + "metric_unit": "points", + "metric_metadata": {"category": current_category} + }) + except ValueError: + # Handle the case where score parsing fails + continue + + # Detect specific test names within a category + elif line.strip() and line.split()[0].isalpha(): + current_metric_name = line.strip() + + # Detect score line before throughput, storing score for metadata + elif current_metric_name and line.strip().isdigit(): + last_score = int(line.strip()) + + # Detect throughput values with units (e.g., 269.3 MB/sec) + elif current_metric_name and line.strip(): + parts = line.strip().split() + try: + value = float(parts[0]) # First part is the numeric value + unit = ' '.join(parts[1:]) if len(parts) > 1 else 'points' # Remaining part is the unit + + # Add the parsed data as a sample, including the last_score in metadata + samples.append({ + "metric_name": f"{current_category} {current_metric_name}", + "metric_value": value, + "metric_unit": unit, + "metric_metadata": { + "category": current_category, + "test": current_metric_name, + "score": last_score + } + }) + + # Reset the metric name and score after processing + current_metric_name = None + last_score = None + except ValueError: + # Handle cases where conversion to float fails + continue + + return samples diff --git a/tests/geekbench_benchmark_windows_test.py b/tests/geekbench_benchmark_windows_test.py new file mode 100644 index 000000000..57e23616d --- /dev/null +++ b/tests/geekbench_benchmark_windows_test.py @@ -0,0 +1,77 @@ +import unittest +import os +from perfkitbenchmarker.windows_benchmarks.geekbench_benchmark import ParseResults + + +class TestParseResults(unittest.TestCase): + ''' + Unit tests for the ParseResults function, which parses Geekbench benchmark output and + converts it into a structured list of performance metrics. + ''' + def setUp(self): + """ + Loads sample Geekbench data from a text file to be used in each test. + + The file 'geekbench_windows.txt' is stored under the 'data/geekbench' directory + and contains raw Geekbench benchmark results to be parsed. + """ + + # Load content from geekbench_windows.txt file + data_file_path = os.path.join( + os.path.dirname(__file__), + '..', 'perfkitbenchmarker', 'data', 'geekbench', 'geekbench_windows.txt' + ) + + # Load content from the specified file path + with open(data_file_path, "r") as f: + self.sample_output = f.read() + + def test_parse_results(self): + """ + Tests the ParseResults function to ensure it correctly parses specific metrics from + the Geekbench output. + """ + + # Run ParseResults function + samples = ParseResults(self.sample_output) + + # Example checks for a specific metric + single_core_score = next(s for s in samples if s['metric_name'] == "Single-Core Score") + self.assertEqual(single_core_score['metric_value'], 1795) + + single_core_file_compression = next(s for s in samples if s['metric_name'] == "Single-Core File Compression") + self.assertEqual(single_core_file_compression['metric_value'], 269.3) + self.assertEqual(single_core_file_compression['metric_unit'], "MB/sec") + self.assertEqual(single_core_file_compression['metric_metadata']['score'], 1875) + + multi_core_score = next(s for s in samples if s['metric_name'] == "Multi-Core Score") + self.assertEqual(multi_core_score['metric_value'], 6627) + + opencl_score = next(s for s in samples if s['metric_name'] == "OpenCL Score") + self.assertEqual(opencl_score['metric_value'], 88265) + + def test_print_parse_results(self): + """ + Prints all parsed samples for manual verification. + + This method outputs each parsed sample in a structured format to the console, + allowing manual inspection of metric names, values, units, and metadata. + """ + + # Run ParseResults function + samples = ParseResults(self.sample_output) + + # Print all parsed data for manual verification + for sample in samples: + print({ + "metric_name": sample["metric_name"], + "metric_value": sample["metric_value"], + "metric_unit": sample["metric_unit"], + "metric_metadata": sample["metric_metadata"] + }) + + +if __name__ == '__main__': + unittest.main() + + diff --git a/tests/linux_benchmarks/geekbench_benchmark_linux_test.py b/tests/linux_benchmarks/geekbench_benchmark_linux_test.py new file mode 100644 index 000000000..dc932a215 --- /dev/null +++ b/tests/linux_benchmarks/geekbench_benchmark_linux_test.py @@ -0,0 +1,85 @@ +import unittest +import os +from perfkitbenchmarker.linux_benchmarks.geekbench_benchmark import ParseResults + + +class TestParseResultsLinux(unittest.TestCase): + ''' + Unit tests for the ParseResults function, which parses Geekbench benchmark output and + converts it into a structured list of performance metrics. + ''' + + def setUp(self): + """ + Loads sample Geekbench data from a text file to be used in each test. + + The file 'geekbench_linux.txt' is stored under the 'data/geekbench' directory + and contains raw Geekbench benchmark results to be parsed. + """ + + # Load content from the Linux geekbench result file + data_file_path = os.path.join( + os.path.dirname(__file__), + '..','..', 'perfkitbenchmarker', 'data', 'geekbench', 'geekbench_linux.txt' + ) + + # Load content from the specified file path + with open(data_file_path, "r") as f: + self.sample_output = f.read() + + def test_parse_results(self): + """ + Tests the ParseResults function to ensure it correctly parses specific metrics from + the Geekbench output. + """ + # Run ParseResults function + samples = ParseResults(self.sample_output) + + # Example checks for specific metrics + single_core_score = next(s for s in samples if s['metric_name'] == "Single-Core Score") + self.assertEqual(single_core_score['metric_value'], 1803) + + single_core_file_compression = next(s for s in samples if s['metric_name'] == "Single-Core File Compression") + self.assertEqual(single_core_file_compression['metric_value'], 257.5) + self.assertEqual(single_core_file_compression['metric_unit'], "MB/sec") + self.assertEqual(single_core_file_compression['metric_metadata']['score'], 1793) + + multi_core_score = next(s for s in samples if s['metric_name'] == "Multi-Core Score") + self.assertEqual(multi_core_score['metric_value'], 5678) + + multi_core_file_compression = next(s for s in samples if s['metric_name'] == "Multi-Core File Compression") + self.assertEqual(multi_core_file_compression['metric_value'], 595.5) + self.assertEqual(multi_core_file_compression['metric_unit'], "MB/sec") + self.assertEqual(multi_core_file_compression['metric_metadata']['score'], 4147) + + opencl_score = next(s for s in samples if s['metric_name'] == "OpenCL Score") + self.assertEqual(opencl_score['metric_value'], 358153) + + opencl_background_blur = next(s for s in samples if s['metric_name'] == "OpenCL Background Blur") + self.assertEqual(opencl_background_blur['metric_value'], 538.9) + self.assertEqual(opencl_background_blur['metric_unit'], "images/sec") + self.assertEqual(opencl_background_blur['metric_metadata']['score'], 130203) + + def test_print_parse_results(self): + """ + Prints all parsed samples for manual verification. + + This method outputs each parsed sample in a structured format to the console, + allowing manual inspection of metric names, values, units, and metadata. + """ + + # Run ParseResults function + samples = ParseResults(self.sample_output) + + # Print all parsed data for manual verification + for sample in samples: + print({ + "metric_name": sample["metric_name"], + "metric_value": sample["metric_value"], + "metric_unit": sample["metric_unit"], + "metric_metadata": sample["metric_metadata"] + }) + + +if __name__ == '__main__': + unittest.main() From 10bb4e7c4c0020c51e5d93eee7095edd01a8ddfe Mon Sep 17 00:00:00 2001 From: Le Li Date: Fri, 8 Nov 2024 03:13:55 +0000 Subject: [PATCH 04/10] Updated CHANGES.next.md --- CHANGES.next.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.next.md b/CHANGES.next.md index 2943aacfd..77992b771 100644 --- a/CHANGES.next.md +++ b/CHANGES.next.md @@ -206,6 +206,7 @@ `journalctl`, and `sos report` (if supported) logs from Linux test VMs. - Add `--vm_log_bucket` flag, offering users the option to upload the logs captured via the `--capture_vm_logs` flag to a GCS bucket. +- Add Geekbench6 benchmark ### Enhancements: From 0c87be8c879e714b46748d4276c1d3325c26a403 Mon Sep 17 00:00:00 2001 From: Le Li Date: Fri, 29 Nov 2024 03:15:12 +0000 Subject: [PATCH 05/10] move geekbench6 sample output txt file into tests folder --- {perfkitbenchmarker => tests}/data/geekbench/geekbench_linux.txt | 0 .../data/geekbench/geekbench_windows.txt | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename {perfkitbenchmarker => tests}/data/geekbench/geekbench_linux.txt (100%) rename {perfkitbenchmarker => tests}/data/geekbench/geekbench_windows.txt (100%) diff --git a/perfkitbenchmarker/data/geekbench/geekbench_linux.txt b/tests/data/geekbench/geekbench_linux.txt similarity index 100% rename from perfkitbenchmarker/data/geekbench/geekbench_linux.txt rename to tests/data/geekbench/geekbench_linux.txt diff --git a/perfkitbenchmarker/data/geekbench/geekbench_windows.txt b/tests/data/geekbench/geekbench_windows.txt similarity index 100% rename from perfkitbenchmarker/data/geekbench/geekbench_windows.txt rename to tests/data/geekbench/geekbench_windows.txt From 6fbc755b22263a92c6ad93fbdb2bf3032cad0d37 Mon Sep 17 00:00:00 2001 From: Le Li Date: Fri, 29 Nov 2024 03:15:12 +0000 Subject: [PATCH 06/10] move geekbench6 sample output txt file into tests folder --- .../data/geekbench/geekbench_linux.txt | 0 .../data/geekbench/geekbench_windows.txt | 0 tests/geekbench_benchmark_windows_test.py | 2 +- tests/linux_benchmarks/geekbench_benchmark_linux_test.py | 2 +- 4 files changed, 2 insertions(+), 2 deletions(-) rename {perfkitbenchmarker => tests}/data/geekbench/geekbench_linux.txt (100%) rename {perfkitbenchmarker => tests}/data/geekbench/geekbench_windows.txt (100%) diff --git a/perfkitbenchmarker/data/geekbench/geekbench_linux.txt b/tests/data/geekbench/geekbench_linux.txt similarity index 100% rename from perfkitbenchmarker/data/geekbench/geekbench_linux.txt rename to tests/data/geekbench/geekbench_linux.txt diff --git a/perfkitbenchmarker/data/geekbench/geekbench_windows.txt b/tests/data/geekbench/geekbench_windows.txt similarity index 100% rename from perfkitbenchmarker/data/geekbench/geekbench_windows.txt rename to tests/data/geekbench/geekbench_windows.txt diff --git a/tests/geekbench_benchmark_windows_test.py b/tests/geekbench_benchmark_windows_test.py index 57e23616d..4930cb71f 100644 --- a/tests/geekbench_benchmark_windows_test.py +++ b/tests/geekbench_benchmark_windows_test.py @@ -19,7 +19,7 @@ def setUp(self): # Load content from geekbench_windows.txt file data_file_path = os.path.join( os.path.dirname(__file__), - '..', 'perfkitbenchmarker', 'data', 'geekbench', 'geekbench_windows.txt' + 'data', 'geekbench', 'geekbench_windows.txt' ) # Load content from the specified file path diff --git a/tests/linux_benchmarks/geekbench_benchmark_linux_test.py b/tests/linux_benchmarks/geekbench_benchmark_linux_test.py index dc932a215..cb5bcb32a 100644 --- a/tests/linux_benchmarks/geekbench_benchmark_linux_test.py +++ b/tests/linux_benchmarks/geekbench_benchmark_linux_test.py @@ -20,7 +20,7 @@ def setUp(self): # Load content from the Linux geekbench result file data_file_path = os.path.join( os.path.dirname(__file__), - '..','..', 'perfkitbenchmarker', 'data', 'geekbench', 'geekbench_linux.txt' + '..', 'data', 'geekbench', 'geekbench_linux.txt' ) # Load content from the specified file path From 8bdb8200c49b46f698224b71332cb8d879d45bfd Mon Sep 17 00:00:00 2001 From: Le Li Date: Fri, 29 Nov 2024 05:11:43 +0000 Subject: [PATCH 07/10] Set up prepare() and use sample.Sample for structured output --- .../windows_benchmarks/geekbench_benchmark.py | 83 ++++++++++--------- 1 file changed, 45 insertions(+), 38 deletions(-) diff --git a/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py b/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py index d2d612311..96f2ee253 100644 --- a/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py +++ b/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py @@ -4,7 +4,7 @@ # Define the configuration for the benchmark. # This includes VM groups and any flags specific to this benchmark. BENCHMARK_CONFIG = """ -geekbench: +geekbench_benchmark: description: > Runs Geekbench 6 to evaluate system performance across CPU and GPU on Linux or Windows platforms. @@ -17,6 +17,7 @@ from perfkitbenchmarker import configs from perfkitbenchmarker import sample + def GetConfig(user_config): """ Returns the configuration for the benchmark. @@ -27,7 +28,8 @@ def Prepare(benchmark_spec): """ Sets up the environment on the VM for the benchmark. """ - pass + vm = benchmark_spec.vms[0] + vm.install('geekbench') def Run(benchmark_spec): """ @@ -45,49 +47,53 @@ def ParseResults(geekbench_output: str): """ Parses Geekbench benchmark results to extract metrics for Single-Core, Multi-Core, and OpenCL performance tests. Each metric entry in the output represents a specific - test result with associated metadata. + test result encapsulated in a `sample.Sample` object. Args: geekbench_output (str): Raw output from a Geekbench benchmark as a string. Returns: - List[Dict]: A list of dictionaries where each dictionary represents a parsed metric - sample. Each dictionary has the following structure: + List[sample.Sample]: A list of `sample.Sample` objects, where each object represents + a parsed metric. Each sample has the following attributes: - - "metric_name" (str): The name of the metric, describing the test and + - `metric` (str): The name of the metric, describing the test and performance category. Examples include "Single-Core File Compression" or "Multi-Core Score". - - "metric_value" (float): The numerical result or score of the specific test. This could + - `value` (float): The numerical result or score of the specific test. This could be a throughput value, such as MB/sec, or a score in points. - - "metric_unit" (str): The unit associated with the metric value. For example, units + - `unit` (str): The unit associated with the metric value. For example, units can be "MB/sec" for throughput or "points" for scores. - - "metric_metadata" (dict): Additional metadata about the test, including: - - "category" (str): The performance category, such as "Single-Core", "Multi-Core", or "OpenCL". - - "test" (str, optional): The specific test name within the category, such as "File Compression" + - `metadata` (dict): Additional metadata about the test, including: + - `category` (str): The performance category, such as "Single-Core", "Multi-Core", or "OpenCL". + - `test` (str, optional): The specific test name within the category, such as "File Compression" or "HTML5 Browser". This key is present for detailed test metrics. - - "score" (int, optional): The individual test score associated with the metric, where applicable. + - `score` (int, optional): The individual test score associated with the metric, where applicable. For instance, if a throughput value is provided, the corresponding score is also included. + - `timestamp` (float): The Unix timestamp when the sample was created. + Example Output: [ - { - "metric_name": "Single-Core Score", - "metric_value": 1795, - "metric_unit": "points", - "metric_metadata": {"category": "Single-Core"} - }, - { - "metric_name": "Single-Core File Compression", - "metric_value": 269.3, - "metric_unit": "MB/sec", - "metric_metadata": { + Sample( + metric="Single-Core Score", + value=1795, + unit="points", + metadata={"category": "Single-Core"}, + timestamp=1699815932.123 + ), + Sample( + metric="Single-Core File Compression", + value=269.3, + unit="MB/sec", + metadata={ "category": "Single-Core", "test": "File Compression", "score": 1875 - } - } + }, + timestamp=1699815932.123 + ) ] """ @@ -114,15 +120,15 @@ def ParseResults(geekbench_output: str): # Detect overall score lines, ensuring current_category is not None elif "Score" in line and current_category: - # Parse overall score based on the current category try: score = int(line.split()[-1]) - samples.append({ - "metric_name": f"{current_category} Score", - "metric_value": score, - "metric_unit": "points", - "metric_metadata": {"category": current_category} - }) + samples.append(sample.Sample( + metric= f"{current_category} Score", + value= score, + unit = "points", + metadata = {"category": current_category} + ) + ) except ValueError: # Handle the case where score parsing fails continue @@ -143,16 +149,17 @@ def ParseResults(geekbench_output: str): unit = ' '.join(parts[1:]) if len(parts) > 1 else 'points' # Remaining part is the unit # Add the parsed data as a sample, including the last_score in metadata - samples.append({ - "metric_name": f"{current_category} {current_metric_name}", - "metric_value": value, - "metric_unit": unit, - "metric_metadata": { + samples.append(sample.Sample( + metric = f"{current_category} {current_metric_name}", + value = value, + unit = unit, + metadata = { "category": current_category, "test": current_metric_name, "score": last_score } - }) + ) + ) # Reset the metric name and score after processing current_metric_name = None From e9a440089621c5973d83eceb5cb9448d985823ba Mon Sep 17 00:00:00 2001 From: Le Li Date: Fri, 29 Nov 2024 05:15:17 +0000 Subject: [PATCH 08/10] Enhance unit tests to validate the new structured output --- tests/geekbench_benchmark_windows_test.py | 41 ++++++++++++----------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/tests/geekbench_benchmark_windows_test.py b/tests/geekbench_benchmark_windows_test.py index 4930cb71f..587a8e6f7 100644 --- a/tests/geekbench_benchmark_windows_test.py +++ b/tests/geekbench_benchmark_windows_test.py @@ -36,19 +36,27 @@ def test_parse_results(self): samples = ParseResults(self.sample_output) # Example checks for a specific metric - single_core_score = next(s for s in samples if s['metric_name'] == "Single-Core Score") - self.assertEqual(single_core_score['metric_value'], 1795) - - single_core_file_compression = next(s for s in samples if s['metric_name'] == "Single-Core File Compression") - self.assertEqual(single_core_file_compression['metric_value'], 269.3) - self.assertEqual(single_core_file_compression['metric_unit'], "MB/sec") - self.assertEqual(single_core_file_compression['metric_metadata']['score'], 1875) - - multi_core_score = next(s for s in samples if s['metric_name'] == "Multi-Core Score") - self.assertEqual(multi_core_score['metric_value'], 6627) - - opencl_score = next(s for s in samples if s['metric_name'] == "OpenCL Score") - self.assertEqual(opencl_score['metric_value'], 88265) + single_core_score = next(s for s in samples if s.metric == "Single-Core Score") + self.assertEqual(single_core_score.value, 1795) + self.assertEqual(single_core_score.unit, "points") + self.assertEqual(single_core_score.metadata["category"], "Single-Core") + + single_core_file_compression = next(s for s in samples if s.metric == "Single-Core File Compression") + self.assertEqual(single_core_file_compression.value, 269.3) + self.assertEqual(single_core_file_compression.unit, "MB/sec") + self.assertEqual(single_core_file_compression.metadata["score"], 1875) + self.assertEqual(single_core_file_compression.metadata["category"], "Single-Core") + self.assertEqual(single_core_file_compression.metadata["test"], "File Compression") + + multi_core_score = next(s for s in samples if s.metric == "Multi-Core Score") + self.assertEqual(multi_core_score.value, 6627) + self.assertEqual(multi_core_score.unit, "points") + self.assertEqual(multi_core_score.metadata["category"], "Multi-Core") + + opencl_score = next(s for s in samples if s.metric == "OpenCL Score") + self.assertEqual(opencl_score.value, 88265) + self.assertEqual(opencl_score.unit, "points") + self.assertEqual(opencl_score.metadata["category"], "OpenCL") def test_print_parse_results(self): """ @@ -63,12 +71,7 @@ def test_print_parse_results(self): # Print all parsed data for manual verification for sample in samples: - print({ - "metric_name": sample["metric_name"], - "metric_value": sample["metric_value"], - "metric_unit": sample["metric_unit"], - "metric_metadata": sample["metric_metadata"] - }) + print(sample.asdict()) # Convert the Sample object to a dictionary for easier inspection if __name__ == '__main__': From b5a5633a5c8e1212509a67fe1f7181d003a2f996 Mon Sep 17 00:00:00 2001 From: Le Li Date: Fri, 29 Nov 2024 05:18:52 +0000 Subject: [PATCH 09/10] Add TODO comments for Run and Cleanup methods --- perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py b/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py index 96f2ee253..2dc81b4aa 100644 --- a/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py +++ b/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py @@ -35,12 +35,15 @@ def Run(benchmark_spec): """ Runs Geekbench on the VM and returns performance samples. """ + # TODO: Trigger Geekbench execution on the VM and parse the results return [] def Cleanup(benchmark_spec): """ Cleans up the environment on the VM after the benchmark. """ + # TODO: Implement cleanup logic to remove Geekbench and any temporary files created during the benchmark + pass def ParseResults(geekbench_output: str): From 51d47ad5d2228bfa61969a4f5ac9f73155bb3ce8 Mon Sep 17 00:00:00 2001 From: Le Li Date: Fri, 29 Nov 2024 06:03:16 +0000 Subject: [PATCH 10/10] Add logging for failed lines in ParseResults --- .../windows_benchmarks/geekbench_benchmark.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py b/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py index 2dc81b4aa..d6125a874 100644 --- a/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py +++ b/perfkitbenchmarker/windows_benchmarks/geekbench_benchmark.py @@ -16,6 +16,7 @@ # Import necessary modules from PKB from perfkitbenchmarker import configs from perfkitbenchmarker import sample +import logging def GetConfig(user_config): @@ -163,12 +164,13 @@ def ParseResults(geekbench_output: str): } ) ) - # Reset the metric name and score after processing current_metric_name = None last_score = None - except ValueError: - # Handle cases where conversion to float fails + + except ValueError as e: + logging.info(f"Failed to parse line: '{line}'. Error: {e}") continue + return samples