From c6641be94b3c120fd49513fa19c4aeabe273a3f5 Mon Sep 17 00:00:00 2001 From: Pascal Fischer <32096965+pascal-fischer@users.noreply.github.com> Date: Thu, 28 Nov 2024 19:22:01 +0100 Subject: [PATCH] [tests] Enable benchmark tests on github actions (#2961) --- .github/workflows/golang-test-linux.yml | 41 ++++++++ management/server/account_test.go | 120 +++++++++++++++--------- management/server/peer_test.go | 40 +++++--- 3 files changed, 141 insertions(+), 60 deletions(-) diff --git a/.github/workflows/golang-test-linux.yml b/.github/workflows/golang-test-linux.yml index ef66720024d..36dcb791f76 100644 --- a/.github/workflows/golang-test-linux.yml +++ b/.github/workflows/golang-test-linux.yml @@ -52,6 +52,47 @@ jobs: - name: Test run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} NETBIRD_STORE_ENGINE=${{ matrix.store }} CI=true go test -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' -timeout 10m -p 1 ./... + benchmark: + strategy: + fail-fast: false + matrix: + arch: [ '386','amd64' ] + store: [ 'sqlite', 'postgres' ] + runs-on: ubuntu-22.04 + steps: + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: "1.23.x" + + + - name: Cache Go modules + uses: actions/cache@v4 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install dependencies + run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev gcc-multilib libpcap-dev + + - name: Install 32-bit libpcap + if: matrix.arch == '386' + run: sudo dpkg --add-architecture i386 && sudo apt update && sudo apt-get install -y libpcap0.8-dev:i386 + + - name: Install modules + run: go mod tidy + + - name: check git status + run: git --no-pager diff --exit-code + + - name: Test + run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} NETBIRD_STORE_ENGINE=${{ matrix.store }} CI=true go test -run=^$ -bench=. -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' -timeout 10m -p 1 ./... + test_client_on_docker: runs-on: ubuntu-20.04 steps: diff --git a/management/server/account_test.go b/management/server/account_test.go index dbabfd3663d..4ff81260737 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -2989,18 +2989,21 @@ func peerShouldReceiveUpdate(t *testing.T, updateMessage <-chan *UpdateMessage) func BenchmarkSyncAndMarkPeer(b *testing.B) { benchCases := []struct { - name string - peers int - groups int - minMsPerOp float64 - maxMsPerOp float64 + name string + peers int + groups int + // We need different expectations for CI/CD and local runs because of the different performance characteristics + minMsPerOpLocal float64 + maxMsPerOpLocal float64 + minMsPerOpCICD float64 + maxMsPerOpCICD float64 }{ - {"Small", 50, 5, 1, 3}, - {"Medium", 500, 100, 7, 13}, - {"Large", 5000, 200, 65, 80}, - {"Small single", 50, 10, 1, 3}, - {"Medium single", 500, 10, 7, 13}, - {"Large 5", 5000, 15, 65, 80}, + {"Small", 50, 5, 1, 3, 4, 10}, + {"Medium", 500, 100, 7, 13, 10, 60}, + {"Large", 5000, 200, 65, 80, 60, 170}, + {"Small single", 50, 10, 1, 3, 4, 60}, + {"Medium single", 500, 10, 7, 13, 10, 26}, + {"Large 5", 5000, 15, 65, 80, 60, 170}, } log.SetOutput(io.Discard) @@ -3033,12 +3036,19 @@ func BenchmarkSyncAndMarkPeer(b *testing.B) { msPerOp := float64(duration.Nanoseconds()) / float64(b.N) / 1e6 b.ReportMetric(msPerOp, "ms/op") - if msPerOp < bc.minMsPerOp { - b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, bc.minMsPerOp) + minExpected := bc.minMsPerOpLocal + maxExpected := bc.maxMsPerOpLocal + if os.Getenv("CI") == "true" { + minExpected = bc.minMsPerOpCICD + maxExpected = bc.maxMsPerOpCICD } - if msPerOp > bc.maxMsPerOp { - b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, bc.maxMsPerOp) + if msPerOp < minExpected { + b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, minExpected) + } + + if msPerOp > maxExpected { + b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, maxExpected) } }) } @@ -3046,18 +3056,21 @@ func BenchmarkSyncAndMarkPeer(b *testing.B) { func BenchmarkLoginPeer_ExistingPeer(b *testing.B) { benchCases := []struct { - name string - peers int - groups int - minMsPerOp float64 - maxMsPerOp float64 + name string + peers int + groups int + // We need different expectations for CI/CD and local runs because of the different performance characteristics + minMsPerOpLocal float64 + maxMsPerOpLocal float64 + minMsPerOpCICD float64 + maxMsPerOpCICD float64 }{ - {"Small", 50, 5, 102, 110}, - {"Medium", 500, 100, 105, 140}, - {"Large", 5000, 200, 160, 200}, - {"Small single", 50, 10, 102, 110}, - {"Medium single", 500, 10, 105, 140}, - {"Large 5", 5000, 15, 160, 200}, + {"Small", 50, 5, 102, 110, 102, 120}, + {"Medium", 500, 100, 105, 140, 105, 170}, + {"Large", 5000, 200, 160, 200, 160, 270}, + {"Small single", 50, 10, 102, 110, 102, 120}, + {"Medium single", 500, 10, 105, 140, 105, 170}, + {"Large 5", 5000, 15, 160, 200, 160, 270}, } log.SetOutput(io.Discard) @@ -3097,12 +3110,19 @@ func BenchmarkLoginPeer_ExistingPeer(b *testing.B) { msPerOp := float64(duration.Nanoseconds()) / float64(b.N) / 1e6 b.ReportMetric(msPerOp, "ms/op") - if msPerOp < bc.minMsPerOp { - b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, bc.minMsPerOp) + minExpected := bc.minMsPerOpLocal + maxExpected := bc.maxMsPerOpLocal + if os.Getenv("CI") == "true" { + minExpected = bc.minMsPerOpCICD + maxExpected = bc.maxMsPerOpCICD + } + + if msPerOp < minExpected { + b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, minExpected) } - if msPerOp > bc.maxMsPerOp { - b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, bc.maxMsPerOp) + if msPerOp > maxExpected { + b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, maxExpected) } }) } @@ -3110,18 +3130,21 @@ func BenchmarkLoginPeer_ExistingPeer(b *testing.B) { func BenchmarkLoginPeer_NewPeer(b *testing.B) { benchCases := []struct { - name string - peers int - groups int - minMsPerOp float64 - maxMsPerOp float64 + name string + peers int + groups int + // We need different expectations for CI/CD and local runs because of the different performance characteristics + minMsPerOpLocal float64 + maxMsPerOpLocal float64 + minMsPerOpCICD float64 + maxMsPerOpCICD float64 }{ - {"Small", 50, 5, 107, 120}, - {"Medium", 500, 100, 105, 140}, - {"Large", 5000, 200, 180, 220}, - {"Small single", 50, 10, 107, 120}, - {"Medium single", 500, 10, 105, 140}, - {"Large 5", 5000, 15, 180, 220}, + {"Small", 50, 5, 107, 120, 107, 140}, + {"Medium", 500, 100, 105, 140, 105, 170}, + {"Large", 5000, 200, 180, 220, 180, 320}, + {"Small single", 50, 10, 107, 120, 105, 140}, + {"Medium single", 500, 10, 105, 140, 105, 170}, + {"Large 5", 5000, 15, 180, 220, 180, 320}, } log.SetOutput(io.Discard) @@ -3161,12 +3184,19 @@ func BenchmarkLoginPeer_NewPeer(b *testing.B) { msPerOp := float64(duration.Nanoseconds()) / float64(b.N) / 1e6 b.ReportMetric(msPerOp, "ms/op") - if msPerOp < bc.minMsPerOp { - b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, bc.minMsPerOp) + minExpected := bc.minMsPerOpLocal + maxExpected := bc.maxMsPerOpLocal + if os.Getenv("CI") == "true" { + minExpected = bc.minMsPerOpCICD + maxExpected = bc.maxMsPerOpCICD + } + + if msPerOp < minExpected { + b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, minExpected) } - if msPerOp > bc.maxMsPerOp { - b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, bc.maxMsPerOp) + if msPerOp > maxExpected { + b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, maxExpected) } }) } diff --git a/management/server/peer_test.go b/management/server/peer_test.go index e5eaa20d605..b15315f9870 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -835,18 +835,21 @@ func BenchmarkGetPeers(b *testing.B) { } func BenchmarkUpdateAccountPeers(b *testing.B) { benchCases := []struct { - name string - peers int - groups int - minMsPerOp float64 - maxMsPerOp float64 + name string + peers int + groups int + // We need different expectations for CI/CD and local runs because of the different performance characteristics + minMsPerOpLocal float64 + maxMsPerOpLocal float64 + minMsPerOpCICD float64 + maxMsPerOpCICD float64 }{ - {"Small", 50, 5, 90, 120}, - {"Medium", 500, 100, 110, 140}, - {"Large", 5000, 200, 800, 1300}, - {"Small single", 50, 10, 90, 120}, - {"Medium single", 500, 10, 110, 170}, - {"Large 5", 5000, 15, 1300, 1800}, + {"Small", 50, 5, 90, 120, 90, 120}, + {"Medium", 500, 100, 110, 140, 120, 200}, + {"Large", 5000, 200, 800, 1300, 2500, 3600}, + {"Small single", 50, 10, 90, 120, 90, 120}, + {"Medium single", 500, 10, 110, 170, 120, 200}, + {"Large 5", 5000, 15, 1300, 1800, 5000, 6000}, } log.SetOutput(io.Discard) @@ -885,12 +888,19 @@ func BenchmarkUpdateAccountPeers(b *testing.B) { msPerOp := float64(duration.Nanoseconds()) / float64(b.N) / 1e6 b.ReportMetric(msPerOp, "ms/op") - if msPerOp < bc.minMsPerOp { - b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, bc.minMsPerOp) + minExpected := bc.minMsPerOpLocal + maxExpected := bc.maxMsPerOpLocal + if os.Getenv("CI") == "true" { + minExpected = bc.minMsPerOpCICD + maxExpected = bc.maxMsPerOpCICD + } + + if msPerOp < minExpected { + b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, minExpected) } - if msPerOp > bc.maxMsPerOp { - b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, bc.maxMsPerOp) + if msPerOp > maxExpected { + b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, maxExpected) } }) }