From d86fd655dd8e0d1e0f58134519fb622a2a77ac45 Mon Sep 17 00:00:00 2001 From: Kirill Fomichev Date: Thu, 8 Aug 2024 10:22:30 -0500 Subject: [PATCH] add rbpf --- rbpf/.github/workflows/main.yml | 103 + rbpf/.gitignore | 2 + rbpf/Cargo.lock | 376 ++ rbpf/Cargo.toml | 51 + rbpf/LICENSE-APACHE | 202 + rbpf/LICENSE-MIT | 25 + rbpf/README.md | 96 + rbpf/benches/elf_loader.rs | 49 + rbpf/benches/jit_compile.rs | 53 + rbpf/benches/memory_mapping.rs | 344 ++ rbpf/benches/vm_execution.rs | 270 ++ rbpf/cli/Cargo.lock | 516 +++ rbpf/cli/Cargo.toml | 14 + rbpf/cli/src/main.rs | 240 + rbpf/clippy.toml | 1 + rbpf/examples/disassemble.rs | 45 + rbpf/examples/to_json.rs | 88 + rbpf/fuzz/.gitignore | 3 + rbpf/fuzz/Cargo.lock | 441 ++ rbpf/fuzz/Cargo.toml | 57 + rbpf/fuzz/fuzz_targets/common.rs | 67 + rbpf/fuzz/fuzz_targets/dumb.rs | 61 + rbpf/fuzz/fuzz_targets/grammar_aware.rs | 244 + rbpf/fuzz/fuzz_targets/semantic_aware.rs | 297 ++ rbpf/fuzz/fuzz_targets/smart.rs | 71 + rbpf/fuzz/fuzz_targets/smart_jit_diff.rs | 112 + rbpf/fuzz/fuzz_targets/smarter_jit_diff.rs | 121 + .../fuzz_targets/verify_semantic_aware.rs | 34 + rbpf/misc/rbpf.ico | Bin 0 -> 1150 bytes rbpf/misc/rbpf.png | Bin 0 -> 29765 bytes rbpf/misc/rbpf_256.png | Bin 0 -> 13059 bytes rbpf/scripts/cargo-for-all-lock-files.sh | 55 + rbpf/scripts/increment-cargo-version.sh | 155 + rbpf/scripts/read-cargo-variable.sh | 14 + rbpf/scripts/semver.sh | 130 + rbpf/src/aarch64.rs | 1015 +++++ rbpf/src/aligned_memory.rs | 296 ++ rbpf/src/asm_parser.rs | 669 +++ rbpf/src/assembler.rs | 446 ++ rbpf/src/debugger.rs | 549 +++ rbpf/src/disassembler.rs | 279 ++ rbpf/src/ebpf.rs | 620 +++ rbpf/src/elf.rs | 1976 +++++++++ rbpf/src/elf_parser/consts.rs | 168 + rbpf/src/elf_parser/mod.rs | 586 +++ rbpf/src/elf_parser/types.rs | 92 + rbpf/src/elf_parser_glue.rs | 566 +++ rbpf/src/error.rs | 195 + rbpf/src/fuzz.rs | 27 + rbpf/src/insn_builder.rs | 2182 +++++++++ rbpf/src/interpreter.rs | 535 +++ rbpf/src/jit.rs | 1730 ++++++++ rbpf/src/lib.rs | 86 + rbpf/src/memory_management.rs | 167 + rbpf/src/memory_region.rs | 1822 ++++++++ rbpf/src/program.rs | 379 ++ rbpf/src/static_analysis.rs | 1173 +++++ rbpf/src/syscalls.rs | 195 + rbpf/src/verifier.rs | 396 ++ rbpf/src/vm.rs | 439 ++ rbpf/src/x86.rs | 712 +++ rbpf/test_utils/Cargo.lock | 266 ++ rbpf/test_utils/Cargo.toml | 10 + rbpf/test_utils/src/lib.rs | 225 + rbpf/tests/assembler.rs | 560 +++ rbpf/tests/disassembler.rs | 341 ++ rbpf/tests/elfs/bss_section.rs | 7 + rbpf/tests/elfs/bss_section.so | Bin 0 -> 5424 bytes rbpf/tests/elfs/data_section.rs | 7 + rbpf/tests/elfs/data_section.so | Bin 0 -> 5432 bytes rbpf/tests/elfs/elf.ld | 26 + rbpf/tests/elfs/elfs.sh | 59 + rbpf/tests/elfs/long_section_name.so | Bin 0 -> 36352 bytes rbpf/tests/elfs/program_headers_overflow.ld | 24 + rbpf/tests/elfs/program_headers_overflow.so | Bin 0 -> 5408 bytes rbpf/tests/elfs/relative_call.rs | 20 + rbpf/tests/elfs/relative_call.so | Bin 0 -> 5384 bytes rbpf/tests/elfs/reloc_64_64.rs | 4 + rbpf/tests/elfs/reloc_64_64.so | Bin 0 -> 5248 bytes rbpf/tests/elfs/reloc_64_64_sbpfv1.so | Bin 0 -> 1440 bytes rbpf/tests/elfs/reloc_64_relative.rs | 4 + rbpf/tests/elfs/reloc_64_relative.so | Bin 0 -> 5424 bytes rbpf/tests/elfs/reloc_64_relative_data.c | 13 + rbpf/tests/elfs/reloc_64_relative_data.so | Bin 0 -> 5784 bytes .../elfs/reloc_64_relative_data_sbpfv1.so | Bin 0 -> 1920 bytes rbpf/tests/elfs/reloc_64_relative_sbpfv1.so | Bin 0 -> 1616 bytes rbpf/tests/elfs/rodata_section.rs | 8 + rbpf/tests/elfs/rodata_section.so | Bin 0 -> 5424 bytes rbpf/tests/elfs/rodata_section_sbpfv1.so | Bin 0 -> 1616 bytes rbpf/tests/elfs/struct_func_pointer.rs | 16 + rbpf/tests/elfs/struct_func_pointer.so | Bin 0 -> 5112 bytes rbpf/tests/elfs/syscall_reloc_64_32.rs | 11 + rbpf/tests/elfs/syscall_reloc_64_32.so | Bin 0 -> 1632 bytes rbpf/tests/elfs/syscall_static.rs | 7 + rbpf/tests/elfs/syscall_static.so | Bin 0 -> 5368 bytes rbpf/tests/elfs/syscalls.rs | 72 + rbpf/tests/execution.rs | 3911 +++++++++++++++++ rbpf/tests/verifier.rs | 384 ++ 98 files changed, 27612 insertions(+) create mode 100644 rbpf/.github/workflows/main.yml create mode 100644 rbpf/.gitignore create mode 100644 rbpf/Cargo.lock create mode 100644 rbpf/Cargo.toml create mode 100644 rbpf/LICENSE-APACHE create mode 100644 rbpf/LICENSE-MIT create mode 100644 rbpf/README.md create mode 100644 rbpf/benches/elf_loader.rs create mode 100644 rbpf/benches/jit_compile.rs create mode 100644 rbpf/benches/memory_mapping.rs create mode 100644 rbpf/benches/vm_execution.rs create mode 100644 rbpf/cli/Cargo.lock create mode 100644 rbpf/cli/Cargo.toml create mode 100644 rbpf/cli/src/main.rs create mode 100644 rbpf/clippy.toml create mode 100644 rbpf/examples/disassemble.rs create mode 100644 rbpf/examples/to_json.rs create mode 100644 rbpf/fuzz/.gitignore create mode 100644 rbpf/fuzz/Cargo.lock create mode 100644 rbpf/fuzz/Cargo.toml create mode 100644 rbpf/fuzz/fuzz_targets/common.rs create mode 100644 rbpf/fuzz/fuzz_targets/dumb.rs create mode 100644 rbpf/fuzz/fuzz_targets/grammar_aware.rs create mode 100644 rbpf/fuzz/fuzz_targets/semantic_aware.rs create mode 100644 rbpf/fuzz/fuzz_targets/smart.rs create mode 100644 rbpf/fuzz/fuzz_targets/smart_jit_diff.rs create mode 100644 rbpf/fuzz/fuzz_targets/smarter_jit_diff.rs create mode 100644 rbpf/fuzz/fuzz_targets/verify_semantic_aware.rs create mode 100644 rbpf/misc/rbpf.ico create mode 100644 rbpf/misc/rbpf.png create mode 100644 rbpf/misc/rbpf_256.png create mode 100755 rbpf/scripts/cargo-for-all-lock-files.sh create mode 100755 rbpf/scripts/increment-cargo-version.sh create mode 100644 rbpf/scripts/read-cargo-variable.sh create mode 100755 rbpf/scripts/semver.sh create mode 100644 rbpf/src/aarch64.rs create mode 100644 rbpf/src/aligned_memory.rs create mode 100644 rbpf/src/asm_parser.rs create mode 100644 rbpf/src/assembler.rs create mode 100644 rbpf/src/debugger.rs create mode 100644 rbpf/src/disassembler.rs create mode 100644 rbpf/src/ebpf.rs create mode 100644 rbpf/src/elf.rs create mode 100644 rbpf/src/elf_parser/consts.rs create mode 100644 rbpf/src/elf_parser/mod.rs create mode 100644 rbpf/src/elf_parser/types.rs create mode 100644 rbpf/src/elf_parser_glue.rs create mode 100644 rbpf/src/error.rs create mode 100644 rbpf/src/fuzz.rs create mode 100644 rbpf/src/insn_builder.rs create mode 100644 rbpf/src/interpreter.rs create mode 100644 rbpf/src/jit.rs create mode 100644 rbpf/src/lib.rs create mode 100644 rbpf/src/memory_management.rs create mode 100644 rbpf/src/memory_region.rs create mode 100644 rbpf/src/program.rs create mode 100644 rbpf/src/static_analysis.rs create mode 100644 rbpf/src/syscalls.rs create mode 100644 rbpf/src/verifier.rs create mode 100644 rbpf/src/vm.rs create mode 100644 rbpf/src/x86.rs create mode 100644 rbpf/test_utils/Cargo.lock create mode 100644 rbpf/test_utils/Cargo.toml create mode 100644 rbpf/test_utils/src/lib.rs create mode 100644 rbpf/tests/assembler.rs create mode 100644 rbpf/tests/disassembler.rs create mode 100644 rbpf/tests/elfs/bss_section.rs create mode 100755 rbpf/tests/elfs/bss_section.so create mode 100644 rbpf/tests/elfs/data_section.rs create mode 100755 rbpf/tests/elfs/data_section.so create mode 100644 rbpf/tests/elfs/elf.ld create mode 100755 rbpf/tests/elfs/elfs.sh create mode 100755 rbpf/tests/elfs/long_section_name.so create mode 100644 rbpf/tests/elfs/program_headers_overflow.ld create mode 100755 rbpf/tests/elfs/program_headers_overflow.so create mode 100644 rbpf/tests/elfs/relative_call.rs create mode 100755 rbpf/tests/elfs/relative_call.so create mode 100644 rbpf/tests/elfs/reloc_64_64.rs create mode 100755 rbpf/tests/elfs/reloc_64_64.so create mode 100755 rbpf/tests/elfs/reloc_64_64_sbpfv1.so create mode 100644 rbpf/tests/elfs/reloc_64_relative.rs create mode 100755 rbpf/tests/elfs/reloc_64_relative.so create mode 100644 rbpf/tests/elfs/reloc_64_relative_data.c create mode 100755 rbpf/tests/elfs/reloc_64_relative_data.so create mode 100755 rbpf/tests/elfs/reloc_64_relative_data_sbpfv1.so create mode 100755 rbpf/tests/elfs/reloc_64_relative_sbpfv1.so create mode 100644 rbpf/tests/elfs/rodata_section.rs create mode 100755 rbpf/tests/elfs/rodata_section.so create mode 100755 rbpf/tests/elfs/rodata_section_sbpfv1.so create mode 100644 rbpf/tests/elfs/struct_func_pointer.rs create mode 100755 rbpf/tests/elfs/struct_func_pointer.so create mode 100644 rbpf/tests/elfs/syscall_reloc_64_32.rs create mode 100755 rbpf/tests/elfs/syscall_reloc_64_32.so create mode 100644 rbpf/tests/elfs/syscall_static.rs create mode 100755 rbpf/tests/elfs/syscall_static.so create mode 100644 rbpf/tests/elfs/syscalls.rs create mode 100644 rbpf/tests/execution.rs create mode 100644 rbpf/tests/verifier.rs diff --git a/rbpf/.github/workflows/main.yml b/rbpf/.github/workflows/main.yml new file mode 100644 index 00000000000000..f098da1a35082f --- /dev/null +++ b/rbpf/.github/workflows/main.yml @@ -0,0 +1,103 @@ +name: rbpf + +on: + push: + branches: + - main + tags: + - v*.*.* + pull_request: + branches: + - main + +jobs: + test: + name: Test + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [macos-latest, ubuntu-latest, windows-latest] + rust: [stable, beta, nightly] + steps: + - uses: actions/checkout@v1 + - name: Setup Rust (rustup) + run: | + rustup update ${{ matrix.rust }} --no-self-update + rustup default ${{ matrix.rust }} + rustup component add clippy + rustup component add rustfmt + shell: bash + - name: Lint + run: | + cargo fmt --all -- --check + cargo clippy --all --tests -- --deny=warnings + if: matrix.rust == 'nightly' + shell: bash + - name: Build and test + run: | + export RUSTFLAGS="-D warnings" + cargo build --verbose + cargo test --verbose + shell: bash + - name: CLI - Lint + run: | + cargo fmt --all --manifest-path cli/Cargo.toml -- --check + cargo clippy --all --tests --manifest-path cli/Cargo.toml -- --deny=warnings + if: matrix.rust == 'nightly' + shell: bash + - name: CLI - Build and test + run: | + export RUSTFLAGS="-D warnings" + cargo build --manifest-path cli/Cargo.toml --verbose + cargo test --manifest-path cli/Cargo.toml --verbose + shell: bash + - name: Check fuzz + run: | + export RUSTFLAGS="-D warnings" + cargo install cargo-fuzz + cargo fuzz build + if: matrix.rust == 'nightly' && matrix.os != 'windows-latest' + shell: bash + - name: Benchmark + run: RUSTFLAGS="-D warnings" cargo bench -- --nocapture + if: matrix.rust == 'nightly' && matrix.os != 'windows-latest' + shell: bash + + coverage: + name: Coverage + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: Setup Rust (rustup) + run: | + rustup update nightly --no-self-update + rustup default nightly + rustup component add llvm-tools-preview + shell: bash + - name: Install cargo-llvm-cov + run: cargo install cargo-llvm-cov + shell: bash + - name: Generate test coverage + run: | + cargo llvm-cov --lcov --output-path coverage.info + shell: bash + - name: Upload test coverage + run: bash <(curl -s https://codecov.io/bash) -f coverage.info || echo "Codecov did not collect coverage reports" + shell: bash + + release: + name: Release + needs: test + runs-on: ubuntu-latest + if: ${{ startsWith(github.ref, 'refs/tags/v') }} + steps: + - uses: actions/checkout@v1 + - name: Doc and package + run: | + cargo doc + cargo package + shell: bash + - name: Publish + env: + CRATES_IO_TOKEN: ${{ secrets.CRATES_IO_TOKEN }} + run: cargo publish --token "$CRATES_IO_TOKEN" diff --git a/rbpf/.gitignore b/rbpf/.gitignore new file mode 100644 index 00000000000000..c5078494edbc75 --- /dev/null +++ b/rbpf/.gitignore @@ -0,0 +1,2 @@ +target +.idea diff --git a/rbpf/Cargo.lock b/rbpf/Cargo.lock new file mode 100644 index 00000000000000..067325a3491f0d --- /dev/null +++ b/rbpf/Cargo.lock @@ -0,0 +1,376 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "arbitrary" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c38b6b6b79f671c25e1a3e785b7b82d7562ffc9cd3efdc98627e5668a2472490" +dependencies = [ + "derive_arbitrary", +] + +[[package]] +name = "ascii" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eab1c04a571841102f5345a8fc0f6bb3d31c315dec879b5c6e42e40ce7ffa34e" + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "byteorder" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855" + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "combine" +version = "3.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da3da6baa321ec19e1cc41d31bf599f00c783d0517095cdaf0332e3fe8d20680" +dependencies = [ + "ascii", + "byteorder 1.4.3", + "either", + "memchr", + "unreachable", +] + +[[package]] +name = "derive_arbitrary" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98e23c06c035dac87bd802d98f368df73a7f2cb05a66ffbd1f377e821fac4af9" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "elf" +version = "0.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4841de15dbe0e49b9b62a417589299e3be0d557e0900d36acb87e6dae47197f5" +dependencies = [ + "byteorder 0.5.3", +] + +[[package]] +name = "gdbstub" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32c95766e0414f8bfc1d07055574c621b67739466d6ba516c4fef8e99d30d2e6" +dependencies = [ + "bitflags", + "cfg-if", + "log", + "managed", + "num-traits", + "paste", +] + +[[package]] +name = "getrandom" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "goblin" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c955ab4e0ad8c843ea653a3d143048b87490d9be56bd7132a435c2407846ac8f" +dependencies = [ + "log", + "plain", + "scroll", +] + +[[package]] +name = "hash32" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" +dependencies = [ + "byteorder 1.4.3", +] + +[[package]] +name = "json" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" + +[[package]] +name = "libc" +version = "0.2.122" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec647867e2bf0772e28c8bcde4f0d19a9216916e890543b5a03ed8ef27b8f259" + +[[package]] +name = "log" +version = "0.4.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "managed" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca88d725a0a943b096803bd34e73a4437208b6077654cc4ecb2947a5f91618d" + +[[package]] +name = "memchr" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" + +[[package]] +name = "num-traits" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +dependencies = [ + "autocfg", +] + +[[package]] +name = "paste" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" + +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + +[[package]] +name = "ppv-lite86" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" + +[[package]] +name = "proc-macro2" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "632d02bff7f874a36f33ea8bb416cd484b90cc66c1194b1a1110d067a7013f58" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" + +[[package]] +name = "scroll" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" +dependencies = [ + "scroll_derive", +] + +[[package]] +name = "scroll_derive" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdbda6ac5cd1321e724fa9cee216f3a61885889b896f073b8f82322789c5250e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "solana_rbpf" +version = "0.8.0" +dependencies = [ + "arbitrary", + "byteorder 1.4.3", + "combine", + "elf", + "gdbstub", + "goblin", + "hash32", + "json", + "libc", + "log", + "rand", + "rustc-demangle", + "scroll", + "test_utils", + "thiserror", + "winapi", +] + +[[package]] +name = "syn" +version = "1.0.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b683b2b825c8eef438b77c36a06dc262294da3d5a5813fac20da149241dcd44d" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "test_utils" +version = "0.8.0" +dependencies = [ + "libc", + "solana_rbpf", +] + +[[package]] +name = "thiserror" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "unicode-xid" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" + +[[package]] +name = "unreachable" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" +dependencies = [ + "void", +] + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + +[[package]] +name = "wasi" +version = "0.10.2+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/rbpf/Cargo.toml b/rbpf/Cargo.toml new file mode 100644 index 00000000000000..fa841b4a63a181 --- /dev/null +++ b/rbpf/Cargo.toml @@ -0,0 +1,51 @@ +[workspace] + +[package] +name = "solana_rbpf" +version = "0.8.0+0" +description = "Virtual machine and JIT compiler for eBPF programs" +authors = ["Solana Maintainers "] +repository = "https://github.com/solana-labs/rbpf" +homepage = "https://solana.com/" +keywords = ["BPF", "eBPF", "interpreter", "JIT", "filtering"] +license = "Apache-2.0" +edition = "2018" + +include = [ + "src/**", + "examples/**", + "tests/**", + "benches/**", + "LICENSE*", + "Cargo.toml", +] + +[dependencies] +arbitrary = { version = "1.0", optional = true, features = ["derive"] } +byteorder = "1.2" +combine = "3.8.1" +gdbstub = { version = "0.6.2", optional = true } +goblin = "0.5.1" +hash32 = "0.2.0" +log = "0.4.2" +rand = { version = "0.8.5", features = ["small_rng"]} +rustc-demangle = "0.1" +scroll = "0.11" +thiserror = "1.0.26" + +[target.'cfg(windows)'.dependencies] +winapi = { version = "0.3", features = ["memoryapi", "sysinfoapi", "winnt", "errhandlingapi"], optional = true } + +[target.'cfg(not(windows))'.dependencies] +libc = { version = "0.2", optional = true } + +[features] +default = ["jit"] +jit = ["libc", "winapi"] +fuzzer-not-safe-for-production = ["arbitrary"] +debugger = ["gdbstub"] + +[dev-dependencies] +elf = "0.0.10" +json = "0.12" +test_utils = { path = "test_utils/" } diff --git a/rbpf/LICENSE-APACHE b/rbpf/LICENSE-APACHE new file mode 100644 index 00000000000000..d645695673349e --- /dev/null +++ b/rbpf/LICENSE-APACHE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/rbpf/LICENSE-MIT b/rbpf/LICENSE-MIT new file mode 100644 index 00000000000000..661a705a284cd1 --- /dev/null +++ b/rbpf/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016 6WIND S.A. + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/rbpf/README.md b/rbpf/README.md new file mode 100644 index 00000000000000..1d46856c40b860 --- /dev/null +++ b/rbpf/README.md @@ -0,0 +1,96 @@ +# solana_rbpf + +![](misc/rbpf_256.png) + +Rust (user-space) virtual machine for eBPF + +[![Build Status](https://github.com/solana-labs/rbpf/actions/workflows/main.yml/badge.svg)](https://github.com/solana-labs/rbpf/actions/workflows/main.yml) +[![Crates.io](https://img.shields.io/crates/v/solana_rbpf.svg)](https://crates.io/crates/solana_rbpf) + +## Description + +This is a fork of [RBPF](https://github.com/qmonnet/rbpf) by Quentin Monnet. + +This crate contains a virtual machine for eBPF program execution. BPF, as in +_Berkeley Packet Filter_, is an assembly-like language initially developed for +BSD systems, in order to filter packets in the kernel with tools such as +tcpdump so as to avoid useless copies to user-space. It was ported to Linux, +where it evolved into eBPF (_extended_ BPF), a faster version with more +features. While BPF programs are originally intended to run in the kernel, the +virtual machine of this crate enables running it in user-space applications; +it contains an interpreter, an x86_64 JIT-compiler for eBPF programs, as well as +an assembler, disassembler and verifier. + +The crate is supposed to compile and run on Linux, MacOS X, and Windows, +although the JIT-compiler does not work with Windows at this time. + +## Link to the crate + +This crate is available from [crates.io](https://crates.io/crates/solana_rbpf), +so it should work out of the box by adding it as a dependency in your +`Cargo.toml` file: + +```toml +[dependencies] +solana_rbpf = "0.8.0" +``` + +You can also use the development version from this GitHub repository. This +should be as simple as putting this inside your `Cargo.toml`: + +```toml +[dependencies] +solana_rbpf = { git = "https://github.com/solana-labs/rbpf", branch = "main" } +``` + +Of course, if you prefer, you can clone it locally, possibly hack the crate, +and then indicate the path of your local version in `Cargo.toml`: + +```toml +[dependencies] +solana_rbpf = { path = "path/to/solana_rbpf" } +``` + +Then indicate in your source code that you want to use the crate: + +```rust,ignore +extern crate solana_rbpf; +``` + +## API + +The API is pretty well documented inside the source code. You should also be +able to access [an online version of the documentation from +here](https://docs.rs/solana_rbpf/), automatically generated from the +[crates.io](https://crates.io/crates/solana_rbpf) +version (may not be up-to-date with master branch). +[Examples](examples), [unit tests](tests) and [performance benchmarks](benches) +should also prove helpful. + +Here are the steps to follow to run an eBPF program with rbpf: + +1. Create the config and a loader built-in program, add some functions. +2. Create an executable, either from the bytecode or an ELF. +3. If you want a JIT-compiled program, compile it. +4. Create a memory mapping, consisting of multiple memory regions. +5. Create a context object which will also acts as instruction meter. +6. Create a virtual machine using all of the previous steps. +7. Execute your program: Either run the interpreter or call the JIT-compiled + function. + +## Developer + +### Dependencies +- rustc version 1.72 or higher + +### Build and test instructions +- To build run `cargo build` +- To test run `cargo test` + +## License + +Following the effort of the Rust language project itself in order to ease +integration with other projects, the rbpf crate is distributed under the terms +of both the MIT license and the Apache License (Version 2.0). + +See [LICENSE-APACHE](LICENSE-APACHE) and [LICENSE-MIT](LICENSE-MIT) for details. diff --git a/rbpf/benches/elf_loader.rs b/rbpf/benches/elf_loader.rs new file mode 100644 index 00000000000000..ea6214001175f1 --- /dev/null +++ b/rbpf/benches/elf_loader.rs @@ -0,0 +1,49 @@ +// Copyright 2020 Solana Maintainers +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +#![feature(test)] + +extern crate solana_rbpf; +extern crate test; +extern crate test_utils; + +use solana_rbpf::{ + elf::Executable, + program::{BuiltinFunction, BuiltinProgram, FunctionRegistry}, + syscalls, + vm::{Config, TestContextObject}, +}; +use std::{fs::File, io::Read, sync::Arc}; +use test::Bencher; + +fn loader() -> Arc> { + let mut function_registry = FunctionRegistry::>::default(); + function_registry + .register_function_hashed(*b"log", syscalls::SyscallString::vm) + .unwrap(); + Arc::new(BuiltinProgram::new_loader( + Config::default(), + function_registry, + )) +} + +#[bench] +fn bench_load_sbpfv1(bencher: &mut Bencher) { + let mut file = File::open("tests/elfs/syscall_reloc_64_32.so").unwrap(); + let mut elf = Vec::new(); + file.read_to_end(&mut elf).unwrap(); + let loader = loader(); + bencher.iter(|| Executable::::from_elf(&elf, loader.clone()).unwrap()); +} + +#[bench] +fn bench_load_sbpfv2(bencher: &mut Bencher) { + let mut file = File::open("tests/elfs/syscall_static.so").unwrap(); + let mut elf = Vec::new(); + file.read_to_end(&mut elf).unwrap(); + let loader = loader(); + bencher.iter(|| Executable::::from_elf(&elf, loader.clone()).unwrap()); +} diff --git a/rbpf/benches/jit_compile.rs b/rbpf/benches/jit_compile.rs new file mode 100644 index 00000000000000..c232bc9a2c6984 --- /dev/null +++ b/rbpf/benches/jit_compile.rs @@ -0,0 +1,53 @@ +// Copyright 2020 Solana Maintainers +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +#![feature(test)] + +extern crate solana_rbpf; +extern crate test; + +use solana_rbpf::{ + elf::Executable, program::BuiltinProgram, verifier::RequisiteVerifier, vm::TestContextObject, +}; +use std::{fs::File, io::Read, sync::Arc}; +use test::Bencher; +use test_utils::create_vm; + +#[bench] +fn bench_init_vm(bencher: &mut Bencher) { + let mut file = File::open("tests/elfs/relative_call.so").unwrap(); + let mut elf = Vec::new(); + file.read_to_end(&mut elf).unwrap(); + let executable = + Executable::::from_elf(&elf, Arc::new(BuiltinProgram::new_mock())) + .unwrap(); + executable.verify::().unwrap(); + bencher.iter(|| { + let mut context_object = TestContextObject::default(); + create_vm!( + _vm, + &executable, + &mut context_object, + stack, + heap, + Vec::new(), + None + ); + }); +} + +#[cfg(not(windows))] +#[bench] +fn bench_jit_compile(bencher: &mut Bencher) { + let mut file = File::open("tests/elfs/relative_call.so").unwrap(); + let mut elf = Vec::new(); + file.read_to_end(&mut elf).unwrap(); + let mut executable = + Executable::::from_elf(&elf, Arc::new(BuiltinProgram::new_mock())) + .unwrap(); + executable.verify::().unwrap(); + bencher.iter(|| executable.jit_compile().unwrap()); +} diff --git a/rbpf/benches/memory_mapping.rs b/rbpf/benches/memory_mapping.rs new file mode 100644 index 00000000000000..a6966db4b99e5a --- /dev/null +++ b/rbpf/benches/memory_mapping.rs @@ -0,0 +1,344 @@ +// Copyright 2020 Solana Maintainers +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +#![feature(test)] + +extern crate rand; +extern crate solana_rbpf; +extern crate test; + +use rand::{rngs::SmallRng, Rng, SeedableRng}; +use solana_rbpf::{ + memory_region::{ + AccessType, AlignedMemoryMapping, MemoryRegion, MemoryState, UnalignedMemoryMapping, + }, + program::SBPFVersion, + vm::Config, +}; +use test::Bencher; + +fn generate_memory_regions( + entries: usize, + state: MemoryState, + mut prng: Option<&mut SmallRng>, +) -> (Vec, u64) { + let mut memory_regions = Vec::with_capacity(entries); + let mut offset = 0x100000000; + for _ in 0..entries { + let length = match &mut prng { + Some(prng) => (*prng).gen::() as u64 + 4, + None => 4, + }; + let content = vec![0; length as usize]; + memory_regions.push(MemoryRegion::new_for_testing( + &content[..], + offset, + 0, + state, + )); + offset += 0x100000000; + } + (memory_regions, offset) +} + +macro_rules! new_prng { + ( ) => { + SmallRng::from_seed([0; 32]) + }; +} + +#[bench] +fn bench_prng(bencher: &mut Bencher) { + let mut prng = new_prng!(); + bencher.iter(|| prng.gen::()); +} + +macro_rules! bench_gapped_randomized_access_with_1024_entries { + (do_bench, $name:ident, $mem:tt) => { + #[bench] + fn $name(bencher: &mut Bencher) { + let frame_size: u64 = 2; + let frame_count: u64 = 1024; + let content = vec![0; (frame_size * frame_count * 2) as usize]; + bencher + .bench(|bencher| { + let memory_regions = vec![MemoryRegion::new_for_testing( + &content[..], + 0x100000000, + frame_size, + MemoryState::Readable, + )]; + let config = Config::default(); + let memory_mapping = + $mem::new(memory_regions, &config, &SBPFVersion::V2).unwrap(); + let mut prng = new_prng!(); + bencher.iter(|| { + assert!(memory_mapping + .map( + AccessType::Load, + 0x100000000 + (prng.gen::() % frame_count * (frame_size * 2)), + 1, + ) + .is_ok()); + }); + Ok(()) + }) + .unwrap(); + } + }; + () => { + bench_gapped_randomized_access_with_1024_entries!( + do_bench, + bench_gapped_randomized_access_with_1024_entries_aligned, + AlignedMemoryMapping + ); + bench_gapped_randomized_access_with_1024_entries!( + do_bench, + bench_gapped_randomized_access_with_1024_entries_unaligned, + UnalignedMemoryMapping + ); + }; +} +bench_gapped_randomized_access_with_1024_entries!(); + +macro_rules! bench_randomized_access_with_0001_entry { + (do_bench, $name:ident, $mem:tt) => { + #[bench] + fn $name(bencher: &mut Bencher) { + let content = vec![0; 1024 * 2]; + let memory_regions = vec![MemoryRegion::new_readonly(&content[..], 0x100000000)]; + let config = Config::default(); + let memory_mapping = $mem::new(memory_regions, &config, &SBPFVersion::V2).unwrap(); + let mut prng = new_prng!(); + bencher.iter(|| { + let _ = memory_mapping.map( + AccessType::Load, + 0x100000000 + (prng.gen::() % content.len() as u64), + 1, + ); + }); + } + }; + () => { + bench_randomized_access_with_0001_entry!( + do_bench, + bench_randomized_access_with_0001_entry_aligned, + AlignedMemoryMapping + ); + bench_randomized_access_with_0001_entry!( + do_bench, + bench_randomized_access_with_0001_entry_unaligned, + UnalignedMemoryMapping + ); + }; +} +bench_randomized_access_with_0001_entry!(); + +macro_rules! bench_randomized_access_with_n_entries { + (do_bench, $name:ident, $mem:tt, $n:expr) => { + #[bench] + fn $name(bencher: &mut Bencher) { + let mut prng = new_prng!(); + let (memory_regions, end_address) = + generate_memory_regions($n, MemoryState::Readable, Some(&mut prng)); + let config = Config::default(); + let memory_mapping = $mem::new(memory_regions, &config, &SBPFVersion::V2).unwrap(); + bencher.iter(|| { + let _ = memory_mapping.map( + AccessType::Load, + 0x100000000 + (prng.gen::() % end_address), + 1, + ); + }); + } + }; + ($n:expr, $aligned:ident, $unaligned:ident) => { + bench_randomized_access_with_n_entries!(do_bench, $aligned, AlignedMemoryMapping, $n); + bench_randomized_access_with_n_entries!(do_bench, $unaligned, UnalignedMemoryMapping, $n); + }; +} +bench_randomized_access_with_n_entries!( + 4, + bench_randomized_access_with_0004_entries_aligned, + bench_randomized_access_with_0004_entries_unaligned +); +bench_randomized_access_with_n_entries!( + 16, + bench_randomized_access_with_0016_entries_aligned, + bench_randomized_access_with_0016_entries_unaligned +); +bench_randomized_access_with_n_entries!( + 64, + bench_randomized_access_with_0064_entries_aligned, + bench_randomized_access_with_0064_entries_unaligned +); +bench_randomized_access_with_n_entries!( + 256, + bench_randomized_access_with_0256_entries_aligned, + bench_randomized_access_with_0256_entries_unaligned +); +bench_randomized_access_with_n_entries!( + 1024, + bench_randomized_access_with_1024_entries_aligned, + bench_randomized_access_with_1024_entries_unaligned +); + +macro_rules! bench_randomized_mapping_with_n_entries { + (do_bench, $name:ident, $mem:tt, $n:expr) => { + #[bench] + fn $name(bencher: &mut Bencher) { + let mut prng = new_prng!(); + let (memory_regions, _end_address) = + generate_memory_regions($n, MemoryState::Readable, Some(&mut prng)); + let config = Config::default(); + let memory_mapping = $mem::new(memory_regions, &config, &SBPFVersion::V2).unwrap(); + bencher.iter(|| { + let _ = memory_mapping.map(AccessType::Load, 0x100000000, 1); + }); + } + }; + ($n:expr, $aligned:ident, $unaligned:ident) => { + bench_randomized_mapping_with_n_entries!(do_bench, $aligned, AlignedMemoryMapping, $n); + bench_randomized_mapping_with_n_entries!(do_bench, $unaligned, UnalignedMemoryMapping, $n); + }; +} +bench_randomized_mapping_with_n_entries!( + 1, + bench_randomized_mapping_with_0001_entries_aligned, + bench_randomized_mapping_with_0001_entries_unaligned +); +bench_randomized_mapping_with_n_entries!( + 4, + bench_randomized_mapping_with_0004_entries_aligned, + bench_randomized_mapping_with_0004_entries_unaligned +); +bench_randomized_mapping_with_n_entries!( + 16, + bench_randomized_mapping_with_0016_entries_aligned, + bench_randomized_mapping_with_0016_entries_unaligned +); +bench_randomized_mapping_with_n_entries!( + 64, + bench_randomized_mapping_with_0064_entries_aligned, + bench_randomized_mapping_with_0064_entries_unaligned +); +bench_randomized_mapping_with_n_entries!( + 256, + bench_randomized_mapping_with_0256_entries_aligned, + bench_randomized_mapping_with_0256_entries_unaligned +); +bench_randomized_mapping_with_n_entries!( + 1024, + bench_randomized_mapping_with_1024_entries_aligned, + bench_randomized_mapping_with_1024_entries_unaligned +); + +macro_rules! bench_mapping_with_n_entries { + (do_bench, $name:ident, $mem:tt, $n:expr) => { + #[bench] + fn $name(bencher: &mut Bencher) { + let (memory_regions, _end_address) = + generate_memory_regions($n, MemoryState::Readable, None); + let config = Config::default(); + let memory_mapping = $mem::new(memory_regions, &config, &SBPFVersion::V2).unwrap(); + bencher.iter(|| { + let _ = memory_mapping.map(AccessType::Load, 0x100000000, 1); + }); + } + }; + ($n:expr, $aligned:ident, $unaligned:ident) => { + bench_mapping_with_n_entries!(do_bench, $aligned, AlignedMemoryMapping, $n); + bench_mapping_with_n_entries!(do_bench, $unaligned, UnalignedMemoryMapping, $n); + }; +} +bench_mapping_with_n_entries!( + 1, + bench_mapping_with_001_entries_aligned, + bench_mapping_with_001_entries_unaligned +); +bench_mapping_with_n_entries!( + 4, + bench_mapping_with_004_entries_aligned, + bench_mapping_with_004_entries_unaligned +); +bench_mapping_with_n_entries!( + 16, + bench_mapping_with_0016_entries_aligned, + bench_mapping_with_0016_entries_unaligned +); +bench_mapping_with_n_entries!( + 64, + bench_mapping_with_0064_entries_aligned, + bench_mapping_with_0064_entries_unaligned +); +bench_mapping_with_n_entries!( + 256, + bench_mapping_with_0256_entries_aligned, + bench_mapping_with_0256_entries_unaligned +); +bench_mapping_with_n_entries!( + 1024, + bench_mapping_with_1024_entries_aligned, + bench_mapping_with_1024_entries_unaligned +); + +enum MemoryOperation { + Map, + Load, + Store(u64), +} + +fn do_bench_mapping_operation(bencher: &mut Bencher, op: MemoryOperation, vm_addr: u64) { + let mut mem1 = vec![0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18]; + let mut mem2 = vec![0x22; 1]; + let config = Config::default(); + let memory_mapping = UnalignedMemoryMapping::new( + vec![ + MemoryRegion::new_writable(&mut mem1, 0x100000000), + MemoryRegion::new_writable(&mut mem2, 0x100000000 + 8), + ], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + + match op { + MemoryOperation::Map => bencher.iter(|| { + let _ = memory_mapping.map(AccessType::Load, vm_addr, 8).unwrap(); + }), + MemoryOperation::Load => bencher.iter(|| { + let _ = memory_mapping.load::(vm_addr).unwrap(); + }), + MemoryOperation::Store(val) => bencher.iter(|| { + let _ = memory_mapping.store(val, vm_addr).unwrap(); + }), + } +} + +#[bench] +fn bench_mapping_8_byte_map(bencher: &mut Bencher) { + do_bench_mapping_operation(bencher, MemoryOperation::Map, 0x100000000) +} + +#[bench] +fn bench_mapping_8_byte_load(bencher: &mut Bencher) { + do_bench_mapping_operation(bencher, MemoryOperation::Load, 0x100000000) +} + +#[bench] +fn bench_mapping_8_byte_load_non_contiguous(bencher: &mut Bencher) { + do_bench_mapping_operation(bencher, MemoryOperation::Load, 0x100000001) +} + +#[bench] +fn bench_mapping_8_byte_store(bencher: &mut Bencher) { + do_bench_mapping_operation(bencher, MemoryOperation::Store(42), 0x100000000) +} + +#[bench] +fn bench_mapping_8_byte_store_non_contiguous(bencher: &mut Bencher) { + do_bench_mapping_operation(bencher, MemoryOperation::Store(42), 0x100000001) +} diff --git a/rbpf/benches/vm_execution.rs b/rbpf/benches/vm_execution.rs new file mode 100644 index 00000000000000..be651e6f625777 --- /dev/null +++ b/rbpf/benches/vm_execution.rs @@ -0,0 +1,270 @@ +// Copyright 2020 Solana Maintainers +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +#![feature(test)] + +extern crate solana_rbpf; +extern crate test; + +use solana_rbpf::{ + ebpf, + elf::Executable, + memory_region::MemoryRegion, + program::{BuiltinProgram, FunctionRegistry}, + verifier::RequisiteVerifier, + vm::{Config, TestContextObject}, +}; +use std::{fs::File, io::Read, sync::Arc}; +use test::Bencher; +use test_utils::create_vm; + +#[bench] +fn bench_init_interpreter_start(bencher: &mut Bencher) { + let mut file = File::open("tests/elfs/rodata_section.so").unwrap(); + let mut elf = Vec::new(); + file.read_to_end(&mut elf).unwrap(); + let executable = + Executable::::from_elf(&elf, Arc::new(BuiltinProgram::new_mock())) + .unwrap(); + executable.verify::().unwrap(); + let mut context_object = TestContextObject::default(); + create_vm!( + vm, + &executable, + &mut context_object, + stack, + heap, + Vec::new(), + None + ); + bencher.iter(|| { + vm.context_object_pointer.remaining = 37; + vm.execute_program(&executable, true).1.unwrap() + }); +} + +#[cfg(not(windows))] +#[bench] +fn bench_init_jit_start(bencher: &mut Bencher) { + let mut file = File::open("tests/elfs/rodata_section.so").unwrap(); + let mut elf = Vec::new(); + file.read_to_end(&mut elf).unwrap(); + let mut executable = + Executable::::from_elf(&elf, Arc::new(BuiltinProgram::new_mock())) + .unwrap(); + executable.verify::().unwrap(); + executable.jit_compile().unwrap(); + let mut context_object = TestContextObject::default(); + create_vm!( + vm, + &executable, + &mut context_object, + stack, + heap, + Vec::new(), + None + ); + bencher.iter(|| { + vm.context_object_pointer.remaining = 37; + vm.execute_program(&executable, false).1.unwrap() + }); +} + +#[cfg(not(windows))] +fn bench_jit_vs_interpreter( + bencher: &mut Bencher, + assembly: &str, + config: Config, + instruction_meter: u64, + mem: &mut [u8], +) { + let mut executable = solana_rbpf::assembler::assemble::( + assembly, + Arc::new(BuiltinProgram::new_loader( + config, + FunctionRegistry::default(), + )), + ) + .unwrap(); + executable.verify::().unwrap(); + executable.jit_compile().unwrap(); + let mut context_object = TestContextObject::default(); + let mem_region = MemoryRegion::new_writable(mem, ebpf::MM_INPUT_START); + create_vm!( + vm, + &executable, + &mut context_object, + stack, + heap, + vec![mem_region], + None + ); + let interpreter_summary = bencher + .bench(|bencher| { + bencher.iter(|| { + vm.context_object_pointer.remaining = instruction_meter; + let (instruction_count_interpreter, result) = vm.execute_program(&executable, true); + assert!(result.is_ok(), "{:?}", result); + assert_eq!(instruction_count_interpreter, instruction_meter); + }); + Ok(()) + }) + .unwrap() + .unwrap(); + let jit_summary = bencher + .bench(|bencher| { + bencher.iter(|| { + vm.context_object_pointer.remaining = instruction_meter; + let (instruction_count_jit, result) = vm.execute_program(&executable, false); + assert!(result.is_ok(), "{:?}", result); + assert_eq!(instruction_count_jit, instruction_meter); + }); + Ok(()) + }) + .unwrap() + .unwrap(); + println!( + "jit_vs_interpreter_ratio={}", + interpreter_summary.mean / jit_summary.mean + ); +} + +#[cfg(not(windows))] +#[bench] +fn bench_jit_vs_interpreter_address_translation(bencher: &mut Bencher) { + bench_jit_vs_interpreter( + bencher, + " + ldxb r0, [r1] + add r1, 1 + mov r0, r1 + and r0, 0xFFFFFF + jlt r0, 0x20000, -5 + exit", + Config::default(), + 655361, + &mut [0; 0x20000], + ); +} + +static ADDRESS_TRANSLATION_STACK_CODE: &str = " + mov r1, r2 + and r1, 4095 + mov r3, r10 + sub r3, r1 + add r3, -1 + ldxb r4, [r3] + add r2, 1 + jlt r2, 0x10000, -8 + exit"; + +#[cfg(not(windows))] +#[bench] +fn bench_jit_vs_interpreter_address_translation_stack_fixed(bencher: &mut Bencher) { + bench_jit_vs_interpreter( + bencher, + ADDRESS_TRANSLATION_STACK_CODE, + Config { + enable_sbpf_v2: false, + ..Config::default() + }, + 524289, + &mut [], + ); +} + +#[cfg(not(windows))] +#[bench] +fn bench_jit_vs_interpreter_address_translation_stack_dynamic(bencher: &mut Bencher) { + bench_jit_vs_interpreter( + bencher, + ADDRESS_TRANSLATION_STACK_CODE, + Config { + enable_sbpf_v2: true, + ..Config::default() + }, + 524289, + &mut [], + ); +} + +#[cfg(not(windows))] +#[bench] +fn bench_jit_vs_interpreter_empty_for_loop(bencher: &mut Bencher) { + bench_jit_vs_interpreter( + bencher, + " + mov r1, r2 + and r1, 1023 + add r2, 1 + jlt r2, 0x10000, -4 + exit", + Config::default(), + 262145, + &mut [0; 0], + ); +} + +#[cfg(not(windows))] +#[bench] +fn bench_jit_vs_interpreter_call_depth_fixed(bencher: &mut Bencher) { + bench_jit_vs_interpreter( + bencher, + " + mov r6, 0 + add r6, 1 + mov r1, 18 + call function_foo + jlt r6, 1024, -4 + exit + function_foo: + stw [r10-4], 0x11223344 + mov r6, r1 + jgt r6, 0, +1 + exit + mov r1, r6 + add r1, -1 + call function_foo + exit", + Config { + enable_sbpf_v2: false, + ..Config::default() + }, + 137218, + &mut [], + ); +} + +#[cfg(not(windows))] +#[bench] +fn bench_jit_vs_interpreter_call_depth_dynamic(bencher: &mut Bencher) { + bench_jit_vs_interpreter( + bencher, + " + mov r6, 0 + add r6, 1 + mov r1, 18 + call function_foo + jlt r6, 1024, -4 + exit + function_foo: + add r11, -4 + stw [r10-4], 0x11223344 + mov r6, r1 + jeq r6, 0, +3 + mov r1, r6 + add r1, -1 + call function_foo + add r11, 4 + exit", + Config { + enable_sbpf_v2: true, + ..Config::default() + }, + 176130, + &mut [], + ); +} diff --git a/rbpf/cli/Cargo.lock b/rbpf/cli/Cargo.lock new file mode 100644 index 00000000000000..dfb41f674d9a23 --- /dev/null +++ b/rbpf/cli/Cargo.lock @@ -0,0 +1,516 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "ascii" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eab1c04a571841102f5345a8fc0f6bb3d31c315dec879b5c6e42e40ce7ffa34e" + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "byteorder" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "clap" +version = "3.0.0-beta.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bd1061998a501ee7d4b6d449020df3266ca3124b941ec56cf2005c3779ca142" +dependencies = [ + "atty", + "bitflags", + "clap_derive", + "indexmap", + "lazy_static", + "os_str_bytes", + "strsim", + "termcolor", + "textwrap", + "unicode-width", + "vec_map", +] + +[[package]] +name = "clap_derive" +version = "3.0.0-beta.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "370f715b81112975b1b69db93e0b56ea4cd4e5002ac43b2da8474106a54096a1" +dependencies = [ + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "combine" +version = "3.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da3da6baa321ec19e1cc41d31bf599f00c783d0517095cdaf0332e3fe8d20680" +dependencies = [ + "ascii", + "byteorder", + "either", + "memchr", + "unreachable", +] + +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "gdbstub" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32c95766e0414f8bfc1d07055574c621b67739466d6ba516c4fef8e99d30d2e6" +dependencies = [ + "bitflags", + "cfg-if 1.0.0", + "log", + "managed", + "num-traits", + "paste", +] + +[[package]] +name = "getrandom" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d39cd93900197114fa1fcb7ae84ca742095eed9442088988ae74fa744e930e77" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi", +] + +[[package]] +name = "goblin" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c955ab4e0ad8c843ea653a3d143048b87490d9be56bd7132a435c2407846ac8f" +dependencies = [ + "log", + "plain", + "scroll", +] + +[[package]] +name = "hash32" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" +dependencies = [ + "byteorder", +] + +[[package]] +name = "hashbrown" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" + +[[package]] +name = "heck" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "hermit-abi" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +dependencies = [ + "libc", +] + +[[package]] +name = "indexmap" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" +dependencies = [ + "autocfg", + "hashbrown", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614" + +[[package]] +name = "log" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +dependencies = [ + "cfg-if 0.1.10", +] + +[[package]] +name = "managed" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca88d725a0a943b096803bd34e73a4437208b6077654cc4ecb2947a5f91618d" + +[[package]] +name = "memchr" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" + +[[package]] +name = "num-traits" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +dependencies = [ + "autocfg", +] + +[[package]] +name = "os_str_bytes" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afb2e1c3ee07430c2cf76151675e583e0f19985fa6efae47d6848a3e2c824f85" + +[[package]] +name = "paste" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" + +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + +[[package]] +name = "ppv-lite86" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rbpf_cli" +version = "0.8.0" +dependencies = [ + "clap", + "solana_rbpf", + "test_utils", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" + +[[package]] +name = "scroll" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" +dependencies = [ + "scroll_derive", +] + +[[package]] +name = "scroll_derive" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdbda6ac5cd1321e724fa9cee216f3a61885889b896f073b8f82322789c5250e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "solana_rbpf" +version = "0.8.0" +dependencies = [ + "byteorder", + "combine", + "gdbstub", + "goblin", + "hash32", + "libc", + "log", + "rand", + "rustc-demangle", + "scroll", + "thiserror", + "winapi", +] + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "syn" +version = "1.0.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc371affeffc477f42a221a1e4297aedcea33d47d19b61455588bd9d8f6b19ac" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "termcolor" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "test_utils" +version = "0.8.0" +dependencies = [ + "libc", + "solana_rbpf", +] + +[[package]] +name = "textwrap" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "203008d98caf094106cfaba70acfed15e18ed3ddb7d94e49baec153a2b462789" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "thiserror" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93119e4feac1cbe6c798c34d3a53ea0026b0b1de6a120deef895137c0529bfe2" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "unicode-segmentation" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" + +[[package]] +name = "unicode-width" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" + +[[package]] +name = "unicode-xid" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" + +[[package]] +name = "unreachable" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" +dependencies = [ + "void", +] + +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + +[[package]] +name = "version_check" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/rbpf/cli/Cargo.toml b/rbpf/cli/Cargo.toml new file mode 100644 index 00000000000000..4655a5e6a62e05 --- /dev/null +++ b/rbpf/cli/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "rbpf_cli" +version = "0.8.0" +description = "CLI to test and analyze eBPF programs" +authors = ["Solana Maintainers "] +repository = "https://github.com/solana-labs/rbpf" +homepage = "https://solana.com/" +keywords = ["BPF", "eBPF", "interpreter", "JIT"] +edition = "2018" + +[dependencies] +solana_rbpf = { path = "../", features = ["debugger"] } +test_utils = { path = "../test_utils/" } +clap = "3.0.0-beta.2" diff --git a/rbpf/cli/src/main.rs b/rbpf/cli/src/main.rs new file mode 100644 index 00000000000000..cded744a0c29b5 --- /dev/null +++ b/rbpf/cli/src/main.rs @@ -0,0 +1,240 @@ +use clap::{crate_version, App, Arg}; +use solana_rbpf::{ + aligned_memory::AlignedMemory, + assembler::assemble, + ebpf, + elf::Executable, + memory_region::{MemoryMapping, MemoryRegion}, + program::{BuiltinProgram, FunctionRegistry}, + static_analysis::Analysis, + verifier::RequisiteVerifier, + vm::{Config, DynamicAnalysis, EbpfVm, TestContextObject}, +}; +use std::{fs::File, io::Read, path::Path, sync::Arc}; + +fn main() { + let matches = App::new("Solana BPF CLI") + .version(crate_version!()) + .author("Solana Maintainers ") + .about("CLI to test and analyze Solana BPF programs") + .arg( + Arg::new("assembler") + .about("Assemble and load Solana BPF executable") + .short('a') + .long("asm") + .value_name("FILE") + .takes_value(true) + .required_unless_present("elf"), + ) + .arg( + Arg::new("elf") + .about("Load ELF as Solana BPF executable") + .short('e') + .long("elf") + .value_name("FILE") + .takes_value(true) + .required_unless_present("assembler"), + ) + .arg( + Arg::new("input") + .about("Input for the program to run on") + .short('i') + .long("input") + .value_name("FILE / BYTES") + .takes_value(true) + .default_value("0"), + ) + .arg( + Arg::new("memory") + .about("Heap memory for the program to run on") + .short('m') + .long("mem") + .value_name("BYTES") + .takes_value(true) + .default_value("0"), + ) + .arg( + Arg::new("use") + .about("Method of execution to use") + .short('u') + .long("use") + .takes_value(true) + .possible_values(&["cfg", "debugger", "disassembler", "interpreter", "jit"]) + .required(true), + ) + .arg( + Arg::new("instruction limit") + .about("Limit the number of instructions to execute") + .short('l') + .long("lim") + .takes_value(true) + .value_name("COUNT") + .default_value(&i64::MAX.to_string()), + ) + .arg( + Arg::new("trace") + .about("Display trace using tracing instrumentation") + .short('t') + .long("trace"), + ) + .arg( + Arg::new("port") + .about("Port to use for the connection with a remote debugger") + .long("port") + .takes_value(true) + .value_name("PORT") + .default_value("9001"), + ) + .arg( + Arg::new("profile") + .about("Display profile using tracing instrumentation") + .short('p') + .long("prof"), + ) + .get_matches(); + + let loader = Arc::new(BuiltinProgram::new_loader( + Config { + enable_instruction_tracing: matches.is_present("trace") + || matches.is_present("profile"), + enable_symbol_and_section_labels: true, + ..Config::default() + }, + FunctionRegistry::default(), + )); + #[allow(unused_mut)] + let mut executable = match matches.value_of("assembler") { + Some(asm_file_name) => { + let mut file = File::open(Path::new(asm_file_name)).unwrap(); + let mut source = Vec::new(); + file.read_to_end(&mut source).unwrap(); + assemble::(std::str::from_utf8(source.as_slice()).unwrap(), loader) + } + None => { + let mut file = File::open(Path::new(matches.value_of("elf").unwrap())).unwrap(); + let mut elf = Vec::new(); + file.read_to_end(&mut elf).unwrap(); + Executable::::from_elf(&elf, loader) + .map_err(|err| format!("Executable constructor failed: {err:?}")) + } + } + .unwrap(); + + executable.verify::().unwrap(); + + let mut mem = match matches.value_of("input").unwrap().parse::() { + Ok(allocate) => vec![0u8; allocate], + Err(_) => { + let mut file = File::open(Path::new(matches.value_of("input").unwrap())).unwrap(); + let mut memory = Vec::new(); + file.read_to_end(&mut memory).unwrap(); + memory + } + }; + #[cfg(all(not(target_os = "windows"), target_arch = "x86_64"))] + if matches.value_of("use") == Some("jit") { + executable.jit_compile().unwrap(); + } + let mut context_object = TestContextObject::new( + matches + .value_of("instruction limit") + .unwrap() + .parse::() + .unwrap(), + ); + let config = executable.get_config(); + let sbpf_version = executable.get_sbpf_version(); + let mut stack = AlignedMemory::<{ ebpf::HOST_ALIGN }>::zero_filled(config.stack_size()); + let stack_len = stack.len(); + let mut heap = AlignedMemory::<{ ebpf::HOST_ALIGN }>::zero_filled( + matches + .value_of("memory") + .unwrap() + .parse::() + .unwrap(), + ); + let regions: Vec = vec![ + executable.get_ro_region(), + MemoryRegion::new_writable_gapped( + stack.as_slice_mut(), + ebpf::MM_STACK_START, + if !sbpf_version.dynamic_stack_frames() && config.enable_stack_frame_gaps { + config.stack_frame_size as u64 + } else { + 0 + }, + ), + MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START), + MemoryRegion::new_writable(&mut mem, ebpf::MM_INPUT_START), + ]; + + let memory_mapping = MemoryMapping::new(regions, config, sbpf_version).unwrap(); + + let mut vm = EbpfVm::new( + executable.get_loader().clone(), + executable.get_sbpf_version(), + &mut context_object, + memory_mapping, + stack_len, + ); + + let analysis = if matches.value_of("use") == Some("cfg") + || matches.value_of("use") == Some("disassembler") + || matches.is_present("trace") + || matches.is_present("profile") + { + Some(Analysis::from_executable(&executable).unwrap()) + } else { + None + }; + match matches.value_of("use") { + Some("cfg") => { + let mut file = File::create("cfg.dot").unwrap(); + analysis + .as_ref() + .unwrap() + .visualize_graphically(&mut file, None) + .unwrap(); + return; + } + Some("disassembler") => { + let stdout = std::io::stdout(); + analysis + .as_ref() + .unwrap() + .disassemble(&mut stdout.lock()) + .unwrap(); + return; + } + _ => {} + } + + if matches.value_of("use").unwrap() == "debugger" { + vm.debug_port = Some(matches.value_of("port").unwrap().parse::().unwrap()); + } + let (instruction_count, result) = + vm.execute_program(&executable, matches.value_of("use").unwrap() != "jit"); + println!("Result: {result:?}"); + println!("Instruction Count: {instruction_count}"); + if matches.is_present("trace") { + println!("Trace:\n"); + let stdout = std::io::stdout(); + analysis + .as_ref() + .unwrap() + .disassemble_trace_log(&mut stdout.lock(), &vm.context_object_pointer.trace_log) + .unwrap(); + } + if matches.is_present("profile") { + let dynamic_analysis = DynamicAnalysis::new( + &vm.context_object_pointer.trace_log, + analysis.as_ref().unwrap(), + ); + let mut file = File::create("profile.dot").unwrap(); + analysis + .as_ref() + .unwrap() + .visualize_graphically(&mut file, Some(&dynamic_analysis)) + .unwrap(); + } +} diff --git a/rbpf/clippy.toml b/rbpf/clippy.toml new file mode 100644 index 00000000000000..1d4a2968aba266 --- /dev/null +++ b/rbpf/clippy.toml @@ -0,0 +1 @@ +doc-valid-idents = ["eBPF", "uBPF"] diff --git a/rbpf/examples/disassemble.rs b/rbpf/examples/disassemble.rs new file mode 100644 index 00000000000000..454fec2e5f5cc6 --- /dev/null +++ b/rbpf/examples/disassemble.rs @@ -0,0 +1,45 @@ +// Copyright 2017 6WIND S.A. +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +extern crate solana_rbpf; +use solana_rbpf::{ + elf::Executable, + program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, + static_analysis::Analysis, + vm::TestContextObject, +}; +use std::sync::Arc; + +// Simply disassemble a program into human-readable instructions. +fn main() { + let program: &'static [u8] = &[ + 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x12, 0x50, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x79, 0x11, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbf, 0x13, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x07, 0x03, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x2d, 0x23, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x69, 0x12, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x02, 0x10, 0x00, + 0x08, 0x00, 0x00, 0x00, 0x71, 0x12, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x02, 0x0e, + 0x00, 0x06, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x11, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbf, + 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x57, 0x02, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, + 0x15, 0x02, 0x08, 0x00, 0x99, 0x99, 0x00, 0x00, 0x18, 0x02, 0x00, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x21, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xb7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x18, 0x02, 0x00, 0x00, 0x00, + 0x00, 0x99, 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d, 0x21, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + ]; + let loader = Arc::new(BuiltinProgram::new_mock()); + let executable = Executable::::from_text_bytes( + program, + loader, + SBPFVersion::V2, + FunctionRegistry::default(), + ) + .unwrap(); + let analysis = Analysis::from_executable(&executable).unwrap(); + let stdout = std::io::stdout(); + analysis.disassemble(&mut stdout.lock()).unwrap(); +} diff --git a/rbpf/examples/to_json.rs b/rbpf/examples/to_json.rs new file mode 100644 index 00000000000000..0e2950aa373f7f --- /dev/null +++ b/rbpf/examples/to_json.rs @@ -0,0 +1,88 @@ +// Copyright 2017 6WIND S.A. +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +#[macro_use] +extern crate json; + +extern crate elf; +use std::path::PathBuf; + +extern crate solana_rbpf; +use solana_rbpf::{ + elf::Executable, + program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, + static_analysis::Analysis, + vm::TestContextObject, +}; +use std::sync::Arc; +// Turn a program into a JSON string. +// +// Relies on `json` crate. +// +// You may copy this function and adapt it according to your needs. For instance, you may want to: +// +// * Remove the "desc" (description) attributes from the output. +// * Print integers as integers, and not as strings containing their hexadecimal representation +// (just replace the relevant `format!()` calls by the commented values. +fn to_json(program: &[u8]) -> String { + let executable = Executable::::from_text_bytes( + program, + Arc::new(BuiltinProgram::new_mock()), + SBPFVersion::V2, + FunctionRegistry::default(), + ) + .unwrap(); + let analysis = Analysis::from_executable(&executable).unwrap(); + + let mut json_insns = vec![]; + for insn in analysis.instructions.iter() { + json_insns.push(object!( + "opc" => format!("{:#x}", insn.opc), // => insn.opc, + "dst" => format!("{:#x}", insn.dst), // => insn.dst, + "src" => format!("{:#x}", insn.src), // => insn.src, + "off" => format!("{:#x}", insn.off), // => insn.off, + // Warning: for imm we use a i64 instead of a i32 (to have correct values for + // `lddw` operation. If we print a number in the JSON this is not a problem, the + // internal i64 has the same value with extended sign on 32 most significant bytes. + // If we print the hexadecimal value as a string however, we want to cast as a i32 + // to prevent all other instructions to print spurious `ffffffff` prefix if the + // number is negative. When values takes more than 32 bits with `lddw`, the cast + // has no effect and the complete value is printed anyway. + "imm" => format!("{:#x}", insn.imm as i32), // => insn.imm, + "desc" => analysis.disassemble_instruction( + insn + ), + )); + } + json::stringify_pretty( + object!( + "size" => json_insns.len(), + "insns" => json_insns + ), + 4, + ) +} + +// Load a program from an object file, and prints it to standard output as a JSON string. +fn main() { + // Let's reuse this file from `load_elf` example. + let filename = "examples/load_elf__block_a_port.o"; + + let path = PathBuf::from(filename); + let file = match elf::File::open_path(path) { + Ok(f) => f, + Err(e) => panic!("Error: {:?}", e), + }; + + let text_scn = match file.get_section(".classifier") { + Some(s) => s, + None => panic!("Failed to look up .classifier section"), + }; + + let prog = &text_scn.data; + + println!("{}", to_json(prog)); +} diff --git a/rbpf/fuzz/.gitignore b/rbpf/fuzz/.gitignore new file mode 100644 index 00000000000000..a0925114d619b8 --- /dev/null +++ b/rbpf/fuzz/.gitignore @@ -0,0 +1,3 @@ +target +corpus +artifacts diff --git a/rbpf/fuzz/Cargo.lock b/rbpf/fuzz/Cargo.lock new file mode 100644 index 00000000000000..c4d0d1961b3d8a --- /dev/null +++ b/rbpf/fuzz/Cargo.lock @@ -0,0 +1,441 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "arbitrary" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c38b6b6b79f671c25e1a3e785b7b82d7562ffc9cd3efdc98627e5668a2472490" +dependencies = [ + "derive_arbitrary", +] + +[[package]] +name = "ascii" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eab1c04a571841102f5345a8fc0f6bb3d31c315dec879b5c6e42e40ce7ffa34e" + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + +[[package]] +name = "cc" +version = "1.0.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "combine" +version = "3.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da3da6baa321ec19e1cc41d31bf599f00c783d0517095cdaf0332e3fe8d20680" +dependencies = [ + "ascii", + "byteorder", + "either", + "memchr", + "unreachable", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "lazy_static", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" +dependencies = [ + "cfg-if", + "lazy_static", +] + +[[package]] +name = "derive_arbitrary" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98e23c06c035dac87bd802d98f368df73a7f2cb05a66ffbd1f377e821fac4af9" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "getrandom" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "goblin" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c955ab4e0ad8c843ea653a3d143048b87490d9be56bd7132a435c2407846ac8f" +dependencies = [ + "log", + "plain", + "scroll", +] + +[[package]] +name = "hash32" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" +dependencies = [ + "byteorder", +] + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.122" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec647867e2bf0772e28c8bcde4f0d19a9216916e890543b5a03ed8ef27b8f259" + +[[package]] +name = "libfuzzer-sys" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "336244aaeab6a12df46480dc585802aa743a72d66b11937844c61bbca84c991d" +dependencies = [ + "arbitrary", + "cc", + "once_cell", +] + +[[package]] +name = "log" +version = "0.4.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "memchr" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" + +[[package]] +name = "memoffset" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num-traits" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "once_cell" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" + +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + +[[package]] +name = "ppv-lite86" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" + +[[package]] +name = "proc-macro2" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "632d02bff7f874a36f33ea8bb416cd484b90cc66c1194b1a1110d067a7013f58" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rayon" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" +dependencies = [ + "autocfg", + "crossbeam-deque", + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-utils", + "lazy_static", + "num_cpus", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "scroll" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" +dependencies = [ + "scroll_derive", +] + +[[package]] +name = "scroll_derive" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdbda6ac5cd1321e724fa9cee216f3a61885889b896f073b8f82322789c5250e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "solana_rbpf" +version = "0.4.0" +dependencies = [ + "arbitrary", + "byteorder", + "combine", + "goblin", + "hash32", + "libc", + "log", + "rand", + "rustc-demangle", + "scroll", + "thiserror", +] + +[[package]] +name = "solana_rbpf-fuzz" +version = "0.4.0" +dependencies = [ + "arbitrary", + "libfuzzer-sys", + "num-traits", + "rayon", + "solana_rbpf", + "test_utils", +] + +[[package]] +name = "syn" +version = "1.0.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b683b2b825c8eef438b77c36a06dc262294da3d5a5813fac20da149241dcd44d" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "test_utils" +version = "0.4.0" +dependencies = [ + "libc", + "solana_rbpf", +] + +[[package]] +name = "thiserror" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "unicode-xid" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" + +[[package]] +name = "unreachable" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" +dependencies = [ + "void", +] + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + +[[package]] +name = "wasi" +version = "0.10.2+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" diff --git a/rbpf/fuzz/Cargo.toml b/rbpf/fuzz/Cargo.toml new file mode 100644 index 00000000000000..957e67ceb350a5 --- /dev/null +++ b/rbpf/fuzz/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "solana_rbpf-fuzz" +version = "0.8.0" +authors = ["Automatically generated"] +publish = false +edition = "2018" + +[package.metadata] +cargo-fuzz = true + +[dependencies] +arbitrary = { version = "1.0", features = ["derive"] } +libfuzzer-sys = "0.4" +num-traits = "0.2" +rayon = "1.5" +test_utils = { path = "../test_utils/" } + +[dependencies.solana_rbpf] +path = ".." +features = ["fuzzer-not-safe-for-production"] + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[features] +only-verified = [] + +[[bin]] +name = "dumb" +path = "fuzz_targets/dumb.rs" +test = false +doc = false + +[[bin]] +name = "smart" +path = "fuzz_targets/smart.rs" +test = false +doc = false + +[[bin]] +name = "smart-jit-diff" +path = "fuzz_targets/smart_jit_diff.rs" +test = false +doc = false + +[[bin]] +name = "smarter-jit-diff" +path = "fuzz_targets/smarter_jit_diff.rs" +test = false +doc = false + +[[bin]] +name = "verify-semantic-aware" +path = "fuzz_targets/verify_semantic_aware.rs" +test = false +doc = false diff --git a/rbpf/fuzz/fuzz_targets/common.rs b/rbpf/fuzz/fuzz_targets/common.rs new file mode 100644 index 00000000000000..f2f3e7e91344ec --- /dev/null +++ b/rbpf/fuzz/fuzz_targets/common.rs @@ -0,0 +1,67 @@ +use std::mem::size_of; + +use arbitrary::{Arbitrary, Unstructured}; + +use solana_rbpf::vm::Config; + +#[derive(Debug)] +pub struct ConfigTemplate { + max_call_depth: usize, + instruction_meter_checkpoint_distance: usize, + noop_instruction_rate: u32, + enable_stack_frame_gaps: bool, + enable_symbol_and_section_labels: bool, + sanitize_user_provided_values: bool, + reject_callx_r10: bool, + optimize_rodata: bool, +} + +impl<'a> Arbitrary<'a> for ConfigTemplate { + fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { + let bools = u16::arbitrary(u)?; + Ok(ConfigTemplate { + max_call_depth: usize::from(u8::arbitrary(u)?) + 1, // larger is unreasonable + must be non-zero + instruction_meter_checkpoint_distance: usize::from(u16::arbitrary(u)?), // larger is unreasonable + noop_instruction_rate: u32::from(u16::arbitrary(u)?), + enable_stack_frame_gaps: bools & (1 << 0) != 0, + enable_symbol_and_section_labels: bools & (1 << 1) != 0, + sanitize_user_provided_values: bools & (1 << 3) != 0, + reject_callx_r10: bools & (1 << 6) != 0, + optimize_rodata: bools & (1 << 9) != 0, + }) + } + + fn size_hint(_: usize) -> (usize, Option) { + ( + size_of::() + size_of::() + size_of::() + size_of::(), + None, + ) + } +} + +impl From for Config { + fn from(template: ConfigTemplate) -> Self { + match template { + ConfigTemplate { + max_call_depth, + instruction_meter_checkpoint_distance, + noop_instruction_rate, + enable_stack_frame_gaps, + enable_symbol_and_section_labels, + sanitize_user_provided_values, + reject_callx_r10, + optimize_rodata, + } => Config { + max_call_depth, + enable_stack_frame_gaps, + instruction_meter_checkpoint_distance, + enable_symbol_and_section_labels, + noop_instruction_rate, + sanitize_user_provided_values, + reject_callx_r10, + optimize_rodata, + ..Default::default() + }, + } + } +} diff --git a/rbpf/fuzz/fuzz_targets/dumb.rs b/rbpf/fuzz/fuzz_targets/dumb.rs new file mode 100644 index 00000000000000..e8b17c2be92597 --- /dev/null +++ b/rbpf/fuzz/fuzz_targets/dumb.rs @@ -0,0 +1,61 @@ +#![no_main] + +use std::hint::black_box; + +use libfuzzer_sys::fuzz_target; + +use solana_rbpf::{ + ebpf, + elf::Executable, + memory_region::MemoryRegion, + program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, + verifier::{RequisiteVerifier, Verifier}, + vm::TestContextObject, +}; +use test_utils::create_vm; + +use crate::common::ConfigTemplate; + +mod common; + +#[derive(arbitrary::Arbitrary, Debug)] +struct DumbFuzzData { + template: ConfigTemplate, + prog: Vec, + mem: Vec, +} + +fuzz_target!(|data: DumbFuzzData| { + let prog = data.prog; + let config = data.template.into(); + let function_registry = FunctionRegistry::default(); + if RequisiteVerifier::verify(&prog, &config, &SBPFVersion::V2, &function_registry).is_err() { + // verify please + return; + } + let mut mem = data.mem; + let executable = Executable::::from_text_bytes( + &prog, + std::sync::Arc::new(BuiltinProgram::new_loader( + config, + FunctionRegistry::default(), + )), + SBPFVersion::V2, + function_registry, + ) + .unwrap(); + let mem_region = MemoryRegion::new_writable(&mut mem, ebpf::MM_INPUT_START); + let mut context_object = TestContextObject::new(29); + create_vm!( + interp_vm, + &executable, + &mut context_object, + stack, + heap, + vec![mem_region], + None + ); + + let (_interp_ins_count, interp_res) = interp_vm.execute_program(&executable, true); + drop(black_box(interp_res)); +}); diff --git a/rbpf/fuzz/fuzz_targets/grammar_aware.rs b/rbpf/fuzz/fuzz_targets/grammar_aware.rs new file mode 100644 index 00000000000000..b5b19f2ec85fc1 --- /dev/null +++ b/rbpf/fuzz/fuzz_targets/grammar_aware.rs @@ -0,0 +1,244 @@ +#![allow(dead_code)] + +use solana_rbpf::insn_builder::{Arch, BpfCode, Cond, Endian, Instruction, MemSize, Source}; + +#[derive(arbitrary::Arbitrary, Debug, Eq, PartialEq)] +pub enum FuzzedOp { + Add(Source), + Sub(Source), + Mul(Source), + Div(Source), + BitOr(Source), + BitAnd(Source), + LeftShift(Source), + RightShift(Source), + Negate, + Modulo(Source), + BitXor(Source), + Mov(Source), + SRS(Source), + SwapBytes(Endian), + Load(MemSize), + LoadAbs(MemSize), + LoadInd(MemSize), + LoadX(MemSize), + Store(MemSize), + StoreX(MemSize), + Jump, + JumpC(Cond, Source), + Call, + Exit, +} + +impl FuzzedOp { + fn similarity(&self, other: &FuzzedOp) -> Option { + if std::mem::discriminant(self) == std::mem::discriminant(&other) { + if &self == &other { + Some(0) + } else { + Some(8) + } + } else { + None + } + } +} + +#[derive(arbitrary::Arbitrary, Debug)] +pub struct FuzzedInstruction { + pub op: FuzzedOp, + pub dst: u8, + pub src: u8, + pub off: i16, + pub imm: i64, +} + +impl FuzzedInstruction { + pub fn similarity(&self, other: &FuzzedInstruction) -> Option { + self.op.similarity(&other.op).map(|s| { + s + (self.dst == other.dst) as u8 + + (self.src == other.src) as u8 + + (self.off == other.off) as u8 + + (self.imm == other.imm) as u8 + }) + } +} + +pub type FuzzProgram = Vec; + +pub fn make_program(prog: &FuzzProgram, arch: Arch) -> BpfCode { + let mut code = BpfCode::default(); + for inst in prog { + match inst.op { + FuzzedOp::Add(src) => code + .add(src, arch) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::Sub(src) => code + .sub(src, arch) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::Mul(src) => code + .mul(src, arch) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::Div(src) => code + .div(src, arch) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::BitOr(src) => code + .bit_or(src, arch) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::BitAnd(src) => code + .bit_and(src, arch) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::LeftShift(src) => code + .left_shift(src, arch) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::RightShift(src) => code + .right_shift(src, arch) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::Negate => code + .negate(arch) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::Modulo(src) => code + .modulo(src, arch) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::BitXor(src) => code + .bit_xor(src, arch) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::Mov(src) => code + .mov(src, arch) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::SRS(src) => code + .signed_right_shift(src, arch) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::SwapBytes(endian) => code + .swap_bytes(endian) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::Load(mem) => code + .load(mem) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::LoadAbs(mem) => code + .load_abs(mem) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::LoadInd(mem) => code + .load_ind(mem) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::LoadX(mem) => code + .load_x(mem) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::Store(mem) => code + .store(mem) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::StoreX(mem) => code + .store_x(mem) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::Jump => code + .jump_unconditional() + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::JumpC(cond, src) => code + .jump_conditional(cond, src) + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::Call => code + .call() + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + FuzzedOp::Exit => code + .exit() + .set_dst(inst.dst) + .set_src(inst.src) + .set_off(inst.off) + .set_imm(inst.imm) + .push(), + }; + } + code +} diff --git a/rbpf/fuzz/fuzz_targets/semantic_aware.rs b/rbpf/fuzz/fuzz_targets/semantic_aware.rs new file mode 100644 index 00000000000000..6e1a066f1f7729 --- /dev/null +++ b/rbpf/fuzz/fuzz_targets/semantic_aware.rs @@ -0,0 +1,297 @@ +#![allow(dead_code)] +// based on: https://sourceware.org/binutils/docs/as/BPF-Opcodes.html + +use std::num::NonZeroI32; + +use solana_rbpf::insn_builder::{Arch, BpfCode, Cond, Endian, Instruction, MemSize, Move, Source}; + +#[derive(arbitrary::Arbitrary, Debug, Eq, PartialEq, Copy, Clone)] +pub struct Register(u8); + +impl Register { + #[cfg(feature = "only-verified")] + fn to_dst(&self) -> u8 { + self.0 % 10 // cannot write to r10 + } + + #[cfg(not(feature = "only-verified"))] + fn to_dst(&self) -> u8 { + self.0 % 11 // cannot write to r10, but we'll try anyways + } + + fn to_src(&self) -> u8 { + self.0 % 11 + } +} + +#[derive(arbitrary::Arbitrary, Debug, Eq, PartialEq, Copy, Clone)] +pub enum FuzzedSource { + Reg(Register), + Imm(i32), +} + +#[derive(arbitrary::Arbitrary, Debug, Eq, PartialEq, Copy, Clone)] +pub enum FuzzedNonZeroSource { + Reg(Register), + Imm(NonZeroI32), +} + +impl From<&FuzzedSource> for Source { + fn from(src: &FuzzedSource) -> Self { + match src { + FuzzedSource::Reg(_) => Source::Reg, + FuzzedSource::Imm(_) => Source::Imm, + } + } +} + +impl From<&FuzzedNonZeroSource> for Source { + fn from(src: &FuzzedNonZeroSource) -> Self { + match src { + FuzzedNonZeroSource::Reg(_) => Source::Reg, + FuzzedNonZeroSource::Imm(_) => Source::Imm, + } + } +} + +#[derive(arbitrary::Arbitrary, Debug, Eq, PartialEq, Copy, Clone)] +pub enum SwapSize { + S16 = 16, + S32 = 32, + S64 = 64, +} + +#[derive(arbitrary::Arbitrary, Debug, Eq, PartialEq, Copy, Clone)] +pub enum FuzzedInstruction { + Add(Arch, Register, FuzzedSource), + Sub(Arch, Register, FuzzedSource), + Mul(Arch, Register, FuzzedSource), + Div(Arch, Register, FuzzedNonZeroSource), + BitOr(Arch, Register, FuzzedSource), + BitAnd(Arch, Register, FuzzedSource), + LeftShift(Arch, Register, FuzzedSource), + RightShift(Arch, Register, FuzzedSource), + Negate(Arch, Register), + Modulo(Arch, Register, FuzzedNonZeroSource), + BitXor(Arch, Register, FuzzedSource), + Mov(Arch, Register, FuzzedSource), + SRS(Arch, Register, FuzzedSource), + SwapBytes(Register, Endian, SwapSize), + #[cfg(feature = "only-verified")] + // load only has lddw; there are no other variants, and it needs to be split + Load(Register, i32, i32), + #[cfg(not(feature = "only-verified"))] + // illegal load variants + Load(Register, MemSize, i64), + LoadAbs(MemSize, i32), + LoadInd(MemSize, Register, i32), + LoadX(Register, MemSize, Register, i16), + Store(Register, MemSize, i16, i32), + StoreX(Register, MemSize, i16, Register), + Jump(i16), + JumpC(Register, Cond, FuzzedSource, i16), + Call(i32), + Exit, +} + +pub type FuzzProgram = Vec; + +fn complete_alu_insn<'i>(insn: Move<'i>, dst: &Register, src: &FuzzedSource) { + match src { + FuzzedSource::Reg(r) => insn.set_dst(dst.to_dst()).set_src(r.to_src()).push(), + FuzzedSource::Imm(imm) => insn.set_dst(dst.to_dst()).set_imm(*imm as i64).push(), + }; +} + +fn complete_alu_insn_shift<'i>(insn: Move<'i>, dst: &Register, src: &FuzzedSource, max: i64) { + match src { + FuzzedSource::Reg(r) => insn.set_dst(dst.to_dst()).set_src(r.to_src()).push(), + FuzzedSource::Imm(imm) => insn + .set_dst(dst.to_dst()) + .set_imm((*imm as i64).rem_euclid(max)) + .push(), + }; +} + +fn complete_alu_insn_nonzero<'i>(insn: Move<'i>, dst: &Register, src: &FuzzedNonZeroSource) { + match src { + FuzzedNonZeroSource::Reg(r) => insn.set_dst(dst.to_dst()).set_src(r.to_src()).push(), + FuzzedNonZeroSource::Imm(imm) => insn + .set_dst(dst.to_dst()) + .set_imm(i32::from(*imm) as i64) + .push(), + }; +} + +#[cfg(feature = "only-verified")] +fn fix_jump(prog: &FuzzProgram, off: i16, pos: usize, len: usize) -> i16 { + let target = (off as usize).rem_euclid(len); + if target == 0 { + return target as i16 - pos as i16 - 1; + } + let mut remaining = target; + for insn in prog.iter() { + let next = match insn { + FuzzedInstruction::Load(_, _, _) => remaining.checked_sub(2), + _ => remaining.checked_sub(1), + }; + match next { + None => { + return target as i16 - pos as i16 - 2; + } + Some(0) => { + return target as i16 - pos as i16 - 1; + } + Some(next) => remaining = next, + } + } + unreachable!("Incorrectly computed length.") +} + +#[cfg(not(feature = "only-verified"))] +fn fix_jump(_: &FuzzProgram, off: i16, _: usize, _: usize) -> i16 { + off +} + +// lddw is twice length +fn calculate_length(prog: &FuzzProgram) -> usize { + prog.len() + + prog + .iter() + .filter(|&&insn| matches!(insn, FuzzedInstruction::Load(_, _, _))) + .count() +} + +pub fn make_program(prog: &FuzzProgram) -> BpfCode { + let mut code = BpfCode::default(); + let len = calculate_length(prog); + let mut pos = 0; + for inst in prog.iter() { + let op = if let FuzzedInstruction::JumpC(_, Cond::Abs, FuzzedSource::Reg(_), off) = inst { + FuzzedInstruction::Jump(*off) + } else { + *inst + }; + match &op { + FuzzedInstruction::Add(a, d, s) => complete_alu_insn(code.add(s.into(), *a), d, s), + FuzzedInstruction::Sub(a, d, s) => complete_alu_insn(code.sub(s.into(), *a), d, s), + FuzzedInstruction::Mul(a, d, s) => complete_alu_insn(code.mul(s.into(), *a), d, s), + FuzzedInstruction::Div(a, d, s) => { + complete_alu_insn_nonzero(code.div(s.into(), *a), d, s) + } + FuzzedInstruction::BitOr(a, d, s) => complete_alu_insn(code.bit_or(s.into(), *a), d, s), + FuzzedInstruction::BitAnd(a, d, s) => { + complete_alu_insn(code.bit_and(s.into(), *a), d, s) + } + FuzzedInstruction::LeftShift(a, d, s) => match a { + Arch::X64 => complete_alu_insn_shift(code.left_shift(s.into(), *a), d, s, 64), + Arch::X32 => complete_alu_insn_shift(code.left_shift(s.into(), *a), d, s, 32), + }, + FuzzedInstruction::RightShift(a, d, s) => match a { + Arch::X64 => complete_alu_insn_shift(code.right_shift(s.into(), *a), d, s, 64), + Arch::X32 => complete_alu_insn_shift(code.right_shift(s.into(), *a), d, s, 32), + }, + FuzzedInstruction::Negate(a, d) => { + code.negate(*a).set_dst(d.to_dst()).push(); + } + FuzzedInstruction::Modulo(a, d, s) => { + complete_alu_insn_nonzero(code.modulo(s.into(), *a), d, s) + } + FuzzedInstruction::BitXor(a, d, s) => { + complete_alu_insn(code.bit_xor(s.into(), *a), d, s) + } + FuzzedInstruction::Mov(a, d, s) => complete_alu_insn(code.mov(s.into(), *a), d, s), + FuzzedInstruction::SRS(a, d, s) => match a { + Arch::X64 => { + complete_alu_insn_shift(code.signed_right_shift(s.into(), *a), d, s, 64) + } + Arch::X32 => { + complete_alu_insn_shift(code.signed_right_shift(s.into(), *a), d, s, 32) + } + }, + FuzzedInstruction::SwapBytes(d, e, s) => { + code.swap_bytes(*e) + .set_dst(d.to_dst()) + .set_imm(*s as i64) + .push(); + } + #[cfg(feature = "only-verified")] + FuzzedInstruction::Load(d, imm1, imm2) => { + // lddw is split in two + code.load(MemSize::DoubleWord) + .set_dst(d.to_dst()) + .set_imm(*imm1 as i64) + .push() + .load(MemSize::Word) + .set_imm(*imm2 as i64) + .push(); + pos += 1; + } + #[cfg(not(feature = "only-verified"))] + FuzzedInstruction::Load(d, m, imm) => { + // lddw should be split in two + code.load(*m).set_dst(d.to_dst()).set_imm(*imm).push(); + } + FuzzedInstruction::LoadAbs(m, imm) => { + code.load_abs(*m).set_imm(*imm as i64).push(); + } + FuzzedInstruction::LoadInd(m, s, imm) => { + code.load_ind(*m) + .set_src(s.to_src()) + .set_imm(*imm as i64) + .push(); + } + FuzzedInstruction::LoadX(d, m, s, off) => { + code.load_x(*m) + .set_dst(d.to_dst()) + .set_src(s.to_src()) + .set_off(*off) + .push(); + } + FuzzedInstruction::Store(d, m, off, imm) => { + code.store(*m) + .set_dst(d.to_src()) + .set_off(*off) + .set_imm(*imm as i64) + .push(); + } + FuzzedInstruction::StoreX(d, m, off, s) => { + code.store_x(*m) + .set_dst(d.to_src()) + .set_off(*off) + .set_src(s.to_src()) + .push(); + } + FuzzedInstruction::Jump(off) => { + code.jump_unconditional() + .set_off(fix_jump(&prog, *off, pos, len)) + .push(); + } + FuzzedInstruction::JumpC(d, c, s, off) => { + match s { + FuzzedSource::Reg(r) => code + .jump_conditional(*c, s.into()) + .set_dst(d.to_dst()) + .set_src(r.to_src()) + .set_off(fix_jump(&prog, *off, pos, len)) + .push(), + FuzzedSource::Imm(imm) => code + .jump_conditional(*c, s.into()) + .set_dst(d.to_dst()) + .set_imm(*imm as i64) + .set_off(fix_jump(&prog, *off, pos, len)) + .push(), + }; + } + FuzzedInstruction::Call(imm) => { + code.call().set_imm(*imm as i64).push(); + } + FuzzedInstruction::Exit => { + code.exit().push(); + } + }; + pos += 1; + } + code.exit().push(); + code +} diff --git a/rbpf/fuzz/fuzz_targets/smart.rs b/rbpf/fuzz/fuzz_targets/smart.rs new file mode 100644 index 00000000000000..8ccb873ef0abdc --- /dev/null +++ b/rbpf/fuzz/fuzz_targets/smart.rs @@ -0,0 +1,71 @@ +#![no_main] + +use std::hint::black_box; + +use libfuzzer_sys::fuzz_target; + +use grammar_aware::*; +use solana_rbpf::{ + ebpf, + elf::Executable, + insn_builder::{Arch, IntoBytes}, + memory_region::MemoryRegion, + program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, + verifier::{RequisiteVerifier, Verifier}, + vm::TestContextObject, +}; +use test_utils::create_vm; + +use crate::common::ConfigTemplate; + +mod common; +mod grammar_aware; + +#[derive(arbitrary::Arbitrary, Debug)] +struct FuzzData { + template: ConfigTemplate, + prog: FuzzProgram, + mem: Vec, + arch: Arch, +} + +fuzz_target!(|data: FuzzData| { + let prog = make_program(&data.prog, data.arch); + let config = data.template.into(); + let function_registry = FunctionRegistry::default(); + if RequisiteVerifier::verify( + prog.into_bytes(), + &config, + &SBPFVersion::V2, + &function_registry, + ) + .is_err() + { + // verify please + return; + } + let mut mem = data.mem; + let executable = Executable::::from_text_bytes( + prog.into_bytes(), + std::sync::Arc::new(BuiltinProgram::new_loader( + config, + FunctionRegistry::default(), + )), + SBPFVersion::V2, + function_registry, + ) + .unwrap(); + let mem_region = MemoryRegion::new_writable(&mut mem, ebpf::MM_INPUT_START); + let mut context_object = TestContextObject::new(1 << 16); + create_vm!( + interp_vm, + &executable, + &mut context_object, + stack, + heap, + vec![mem_region], + None + ); + let (_interp_ins_count, interp_res) = interp_vm.execute_program(&executable, true); + drop(black_box(interp_res)); +}); diff --git a/rbpf/fuzz/fuzz_targets/smart_jit_diff.rs b/rbpf/fuzz/fuzz_targets/smart_jit_diff.rs new file mode 100644 index 00000000000000..8e282065464c7f --- /dev/null +++ b/rbpf/fuzz/fuzz_targets/smart_jit_diff.rs @@ -0,0 +1,112 @@ +#![no_main] + +use libfuzzer_sys::fuzz_target; + +use grammar_aware::*; +use solana_rbpf::{ + ebpf, + elf::Executable, + insn_builder::{Arch, Instruction, IntoBytes}, + memory_region::MemoryRegion, + program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, + verifier::{RequisiteVerifier, Verifier}, + vm::TestContextObject, +}; +use test_utils::create_vm; + +use crate::common::ConfigTemplate; + +mod common; +mod grammar_aware; + +#[derive(arbitrary::Arbitrary, Debug)] +struct FuzzData { + template: ConfigTemplate, + exit_dst: u8, + exit_src: u8, + exit_off: i16, + exit_imm: i64, + prog: FuzzProgram, + mem: Vec, +} + +fuzz_target!(|data: FuzzData| { + let mut prog = make_program(&data.prog, Arch::X64); + prog.exit() + .set_dst(data.exit_dst) + .set_src(data.exit_src) + .set_off(data.exit_off) + .set_imm(data.exit_imm) + .push(); + let config = data.template.into(); + let function_registry = FunctionRegistry::default(); + if RequisiteVerifier::verify( + prog.into_bytes(), + &config, + &SBPFVersion::V2, + &function_registry, + ) + .is_err() + { + // verify please + return; + } + let mut interp_mem = data.mem.clone(); + let mut jit_mem = data.mem; + let mut executable = Executable::::from_text_bytes( + prog.into_bytes(), + std::sync::Arc::new(BuiltinProgram::new_loader( + config, + FunctionRegistry::default(), + )), + SBPFVersion::V2, + function_registry, + ) + .unwrap(); + if executable.jit_compile().is_ok() { + let mut interp_context_object = TestContextObject::new(1 << 16); + let interp_mem_region = MemoryRegion::new_writable(&mut interp_mem, ebpf::MM_INPUT_START); + create_vm!( + interp_vm, + &executable, + &mut interp_context_object, + interp_stack, + interp_heap, + vec![interp_mem_region], + None + ); + + let mut jit_context_object = TestContextObject::new(1 << 16); + let jit_mem_region = MemoryRegion::new_writable(&mut jit_mem, ebpf::MM_INPUT_START); + create_vm!( + jit_vm, + &executable, + &mut jit_context_object, + jit_stack, + jit_heap, + vec![jit_mem_region], + None + ); + + let (_interp_ins_count, interp_res) = interp_vm.execute_program(&executable, true); + let (_jit_ins_count, jit_res) = jit_vm.execute_program(&executable, false); + if format!("{:?}", interp_res) != format!("{:?}", jit_res) { + panic!("Expected {:?}, but got {:?}", interp_res, jit_res); + } + if interp_res.is_ok() { + // we know jit res must be ok if interp res is by this point + if interp_context_object.remaining != jit_context_object.remaining { + panic!( + "Expected {} insts remaining, but got {}", + interp_context_object.remaining, jit_context_object.remaining + ); + } + if interp_mem != jit_mem { + panic!( + "Expected different memory. From interpreter: {:?}\nFrom JIT: {:?}", + interp_mem, jit_mem + ); + } + } + } +}); diff --git a/rbpf/fuzz/fuzz_targets/smarter_jit_diff.rs b/rbpf/fuzz/fuzz_targets/smarter_jit_diff.rs new file mode 100644 index 00000000000000..9f31e2c7d53af5 --- /dev/null +++ b/rbpf/fuzz/fuzz_targets/smarter_jit_diff.rs @@ -0,0 +1,121 @@ +#![no_main] + +use libfuzzer_sys::fuzz_target; + +use semantic_aware::*; +use solana_rbpf::{ + ebpf, + elf::Executable, + insn_builder::IntoBytes, + memory_region::MemoryRegion, + program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, + static_analysis::Analysis, + verifier::{RequisiteVerifier, Verifier}, + vm::{ContextObject, TestContextObject}, +}; +use test_utils::create_vm; + +use crate::common::ConfigTemplate; + +mod common; +mod semantic_aware; + +#[derive(arbitrary::Arbitrary, Debug)] +struct FuzzData { + template: ConfigTemplate, + prog: FuzzProgram, + mem: Vec, +} + +fn dump_insns(executable: &Executable) { + let analysis = Analysis::from_executable(executable).unwrap(); + eprint!("Using the following disassembly"); + analysis.disassemble(&mut std::io::stderr().lock()).unwrap(); +} + +fuzz_target!(|data: FuzzData| { + let prog = make_program(&data.prog); + let config = data.template.into(); + let function_registry = FunctionRegistry::default(); + if RequisiteVerifier::verify( + prog.into_bytes(), + &config, + &SBPFVersion::V2, + &function_registry, + ) + .is_err() + { + // verify please + return; + } + let mut interp_mem = data.mem.clone(); + let mut jit_mem = data.mem; + let mut executable = Executable::::from_text_bytes( + prog.into_bytes(), + std::sync::Arc::new(BuiltinProgram::new_loader( + config, + FunctionRegistry::default(), + )), + SBPFVersion::V2, + function_registry, + ) + .unwrap(); + if executable.jit_compile().is_ok() { + let mut interp_context_object = TestContextObject::new(1 << 16); + let interp_mem_region = MemoryRegion::new_writable(&mut interp_mem, ebpf::MM_INPUT_START); + create_vm!( + interp_vm, + &executable, + &mut interp_context_object, + interp_stack, + interp_heap, + vec![interp_mem_region], + None + ); + + let mut jit_context_object = TestContextObject::new(1 << 16); + let jit_mem_region = MemoryRegion::new_writable(&mut jit_mem, ebpf::MM_INPUT_START); + create_vm!( + jit_vm, + &executable, + &mut jit_context_object, + jit_stack, + jit_heap, + vec![jit_mem_region], + None + ); + + let (_interp_ins_count, interp_res) = interp_vm.execute_program(&executable, true); + let (_jit_ins_count, jit_res) = jit_vm.execute_program(&executable, false); + let interp_res_str = format!("{:?}", interp_res); + let jit_res_str = format!("{:?}", jit_res); + if interp_res_str != jit_res_str { + // spot check: there's a meaningless bug where ExceededMaxInstructions is different due to jump calculations + if interp_res_str.contains("ExceededMaxInstructions") + && jit_res_str.contains("ExceededMaxInstructions") + { + return; + } + eprintln!("{:#?}", &data.prog); + dump_insns(&executable); + panic!("Expected {}, but got {}", interp_res_str, jit_res_str); + } + if interp_res.is_ok() { + // we know jit res must be ok if interp res is by this point + if interp_context_object.remaining != jit_context_object.remaining { + dump_insns(&executable); + panic!( + "Expected {} insts remaining, but got {}", + interp_context_object.remaining, jit_context_object.remaining + ); + } + if interp_mem != jit_mem { + dump_insns(&executable); + panic!( + "Expected different memory. From interpreter: {:?}\nFrom JIT: {:?}", + interp_mem, jit_mem + ); + } + } + } +}); diff --git a/rbpf/fuzz/fuzz_targets/verify_semantic_aware.rs b/rbpf/fuzz/fuzz_targets/verify_semantic_aware.rs new file mode 100644 index 00000000000000..c1e4e171eca74d --- /dev/null +++ b/rbpf/fuzz/fuzz_targets/verify_semantic_aware.rs @@ -0,0 +1,34 @@ +#![no_main] + +use libfuzzer_sys::fuzz_target; + +use semantic_aware::*; +use solana_rbpf::{ + insn_builder::IntoBytes, + program::{FunctionRegistry, SBPFVersion}, + verifier::{RequisiteVerifier, Verifier}, +}; + +use crate::common::ConfigTemplate; + +mod common; +mod semantic_aware; + +#[derive(arbitrary::Arbitrary, Debug)] +struct FuzzData { + template: ConfigTemplate, + prog: FuzzProgram, +} + +fuzz_target!(|data: FuzzData| { + let prog = make_program(&data.prog); + let config = data.template.into(); + let function_registry = FunctionRegistry::default(); + RequisiteVerifier::verify( + prog.into_bytes(), + &config, + &SBPFVersion::V2, + &function_registry, + ) + .unwrap(); +}); diff --git a/rbpf/misc/rbpf.ico b/rbpf/misc/rbpf.ico new file mode 100644 index 0000000000000000000000000000000000000000..6bb94a1c3e0bdeb109a02dba7cb51b05abd2fb7c GIT binary patch literal 1150 zcmeIuu`feW6vy#X70;yYVr%+DlU6NV`~_n02Ur?|upkBon~;z;5rf4b5eyQe0RuxD zWvMh_>p+@Vni%^ZwBH{t*JiIapPqZ)Ip_A=XBOcbj~lqW8!vE(F zZy=Q#P2vuZnDJZK8%|Nf4F;7bev}dW@QRI0W_*@-fuuWw^)kv~n9dW&aNu%^>~lEA zGa4wN;A&W};uejs`XcKDG}9G!&_o@olWl0$dmN#KOO$bjLp)&;FQ`C0d(gSM&n7cR zi2K-r=2FcF&Y^i9uz;#J?>hTw=s%TT!H}2Nop+&rou@mjdHD(U@6g=WI6(kcqW&1% qtV^t;-$@qir=UJb!%z48y3;Z9wcBQIbC~~mH3NZ}1hL<>>gxh+sB6an literal 0 HcmV?d00001 diff --git a/rbpf/misc/rbpf.png b/rbpf/misc/rbpf.png new file mode 100644 index 0000000000000000000000000000000000000000..b3f07c4bd6e709af8033c371de2817d7da67b25e GIT binary patch literal 29765 zcmY(qbzGEDw>CU z-(#lNdZv?Q$Ff6ZL7xeR)Ah+l&y@uGt3CXF8|lY$YnmHJV!AB5E|X>rtc zhMh{kNpwz}tncuH5v8-YMR?Un4M-!)NMErWDN*N;RS^6n)H`+>{j7N~KoA`O(}h)m zl&W~DX7T#)@8ao$`hw~@OPSx&E_SN_@cq||Ylks0tT_~sf?ug6hWsP>M;JhOdv;f! zB(N^-jwbXa&fISQvDkZpiXDv27w+wU@VJYHI9Zfn+q6%)E?AWz@tc&CA^lnh{^g5g zrEm=LyC$9TaxX2@n*DLH(~ikn zMVSkz;}{>dPq+VQ;IBEIZTl=nsJ6V8;j1`hH*k72V>~X5aACeN1>dq-gO08@rT~=? zMmZ0Uy%Hv?Qr{i97>Fr9LA0XJa#;RBbtO;p6DA8HWw6i0*IiX}xq1HG)ImN#*#=87gleZ_8-;f+|mt@e9;p8oO}bT_c>JyUv5BFcH` zx($oeeRh{Yc`T5}e#Kk|?T0=8$?J9X;I(DdQ)lB;%zt@k{1vMe)s zOG|&SLUO$XfA;s^{3OWs#HmiA$VakRMvV%6Xz;UhN-&oiXJR)%4qX?sid`0|SI+s%}1C@5rg7w1Esvd|Sq$DC9PauyQos@JKcdb-)tKlHP3&9UUMH@ry2lv*%1%X)jBhn=r;BeK~IzHL*I3~nnq_15fW zQ1avjh+jC13=E=MKEv2v1ajhE-uK=iY>)(7kjNfzv7h-7huJQsdg3H;jL^nv)rV|& zr=}_I!ts6J#qeU+*TGeIRiHxnyU&`=sWPo4=yn>S#H4&fn%LIBI=Ar+1g-bhZJIVT ziiBy9@Z{LZzeHz~#qOBAcAs#8YIXB|ZYX@9ysd$teY&T8k+qpE%k1&PA-vDWp{*8+ zS29FmBy*tf9ek5Lr>fF%vf<1AlxuFc5}~Rkcq{#y$sOh;5WI!03_=N9nR$*6%aH<>=S+v~t)?)X|+Ekd@4Tik^f7`?yj)`Luyx>0H;5Lzb_VYJpDmULq!XmtqlEMR=9`SD$EMGV%tWqHUT8ndk zAjx8S5SYV0vshsb%3$9Hm`*OlpU%s%_rm7M|9$feWWjrV^$3Ef77H~ZvOPsn+W8QN zOU=28@HH#lO%?4372m~+3#=1sPz0+k&hesxSUUZL_l%*^GT9WiYuyHY771yv{D8XY zVb#AZ)AP#m8<+{>&kV}S9q$BE-jOYEsU`O8V2oCkT`u3Hq*4J+Na|AfSe(jYBSnef zc#jXk?f#S9c)_3H4n@iAmXE*G^O##Pj$igoNy*M%BtvB2UVNwba8A&ZXhg^}d3o#n zn!HJZMt+FlDDgvg;GCx0!~1UmqiXiwQb+GTj($HItW+X6_#5w%=AMG?oV6|=)$@e( z<5@g+jVVp};-UOO&79dGq;**VW5GxJ>iG24kbu+_9@VtbBe!LSHD+G+Dg~$HCwcf` zlvb;l)lAJbJiBgzmhBRFK7P0W`+=nN>c2;JeFEhj*N0B|A_juIq=K_yao^1 zPCnzgZ(aIt&&_M%HAhgYYsZI385h65n;B47CcNQvkGGg28Q3pG$aq?RFDg`6ER^Le z92JP&!r+YrenxjcN`Y`do>%K?$N0$gYWcb|%ni(9-H$-0zsN#Z&WIQO9KCX1#q1dS zGxMD8o2tjgGyZ4krcx<7c%DN%oN!w_=is{cF+Oet&eGpOA=xr=VruW*nFoqkGW2u_ z5>K4;`35U*_gsKce<)wNFJN|_{?AZ9NOo1!V`-I@6_-?N#r7u2J7$RHD0Ey?Uj^Ws z$#^tZqg%K@8d6$PCm?@@L@q9iZ>LJ@m@EC?=FxL~^TPd_>$U_g8n53Lbf8ko@@06dORwR)wHFKXmHdyhYGKCZ7-S7u*X-i#`*CF^pRM z!R&xV1YL>!K%8;D`|@56>U1>E0~3Hrb-Y-u*RuYdv+@FZ0>#xx3nQmDeke;4vIrCK z0?`@_&QCq}FfjIc(Gwy!XL~UuTw55{ASYfHCL~-aWe`?HzYtYUwziAj2i< z&A!ua>0Jsi*j57=JrNk4Z~5~l4nNE;5|X)rN=LWcN3nQ}M9v$_Fc&=0kuyB0mpt@N}MpA?O7?+|;uDz$1@?3qLMQHz^g!Ibx>1X`C?<>@QOu}PO>ZlUZ z-XGNhcb)(aD47-6x0k?k#Y`L~S515NgzI^Dl}vEukiQ&k7jM2KU_bg+@5Nt&dl+PU zR)L5~(rJr>w&1d&vgF&ICBAC+A8o%uA&MBceRPW>d>gI+s}x$b%vmBU5j#2hewvC% zdm*T{&~e4h;2=|-Ak%8x4R_Hfzkw%CpfBGO()r$|jJUCn&1kM8)$3S<#RKY+erVDa zw7a#6u(Ftq|m^0(kl1BFzmtq{de4EDj-mQ*P;*X_yHM2G$jBf)`2pcR8@`aySNr z?*~km0xP*Hl84t3SEHk@9R@(P0`l1EdZ+u!XZ;_E#7bJ>(BQf#RwH#=kXR+|+h!|v zD~hUORu}ew%uOIp`_`{@%&IGdc3XXJYes~P>Kc@@g}KVMKi?v>db09qQVYAaoU{2_ z0q20L(>1KJbgTPJt1Gg+ggT_{v9kTg?uC%Ku*duH=P7QE(pL5-GS@Qs?{9c@rkyh0 z=Gnp@C$iy0WgB(vbBWXxLnUA<4EWW^9F6NITX$)@%5_EFM3d|}t>I~WYOZIS>J;Kp zTn%Mi_?5OKgw=vsnDEXpLrP&l4E;@6VqG)_q4M z)`-OQ^V07FIcYMR4KCF_&4S%(f1~@=nSg}gkhsNxc~2JVXO2hI9gN+Qv=dd9efsB{jPb`PSyVsXl|03iIQd$^ z#IZJN2+l#CTKv0hx?$t3vSdqg62Z?plq=QWb4#+Q*KI`tDQ>ypjLozzMwF+W!FA)t z3R0Xh zxRmRM=F9GfnSifS+$(w%*dn(!kJO4*yR?rv>s`PEwN2~8zN0!{T?JMSF&21-K37~& z{WGMAmT}nncdMGpi?AUkvufWo+K=gA_}y>mzWg>8vmvBHg}~D+tra{w>jQ;f4#=q! zgp0pJQTh>*oOPE-}FMp!Ig1w#2k8uzMsk_a)?ajSF& z`NB{ZLCf*Nj9SUrpooU3x5&LUDxmJO(X#@(e?W35{q*m~pw@4JF$i(%IS5meRZ#23 z?LVK8MvXj(9yUwVmMEd(4kph~H|EU|0CfsXJhXtMqN^-A*}rS?l>Vd>hvE>P+_5>|z$B$7wCP zrNQW#@|a0KjVSJ4JE{93=Bl9FbB1h%&hF4GvvMvRQA!Ma<%E3|knCo3`sWQF(JA#H z|EY}69WnfPX7KakjpMe{c%l%ggrTZJuMLn5>f@4=B95(z81{$CVlADN_fgK|k{{

Oz5%vp^Sd zDT#Yej2*H>nPfk*2vR-|W;}#S-(wp>H;SpI08nZ}_T(nm!^V~#l}5nT)daIB+P=6} zvRT)J*b?RukmFFrJ-UlYJ*?km2dP zNsGODEiFVb1ae#F76xw!E^E*(c8L`(da&6LTv5(}=9V$90<*MpW^Xl%tObWMd11E{ zj9Z01^rxnbeC^jHyV=G(WktV(UA&UAfZ5cysMk0iJB#aQe4D=}^zik5?q4MyI=Rjk zl->lO3t*Me(x1e@1HHa}ig@hQ@lWZRI8BY?&lHC?W4@VaeU1gHE?b+(Ruj~7M1mIy#BuWx z6Y*RL4|Av)Syl$b|1nU!ur@Ub!c;c1PD$FPDUCn6&^r`G+uUT$uGeFC7XXii_idiD z6_lNmRL98#*Yjid9 zB0WBpGc0zG`Mkui(C-%ze#So)9Nd1o^qPjmrhHUocfDMCn2#sS<58CeCHfnRwJp2?D3-V~ z6exr9Au^m(_4zKg1h{Z@b z-n@J*s%py&%HMbLA9Im-uMK(LIPAWQ@iaP)C7-Q{XG|dhz>bUZh`Zl!^B%reFCbpV zi&m8rrZ)I3M^e2Z$-Il!!onaO$rb~U8V!XoiX(29JjmK1?%d31rb zl3w16R^!`ZoF=cd9&f+Kda|0oKvab`xw;laqkTC^PoSa(^wN zJGXqHpLo8T0tE&f5Ws7y5kQPky-Rl9?7c#P$f{Fb?n#O9;k}ci%3f_raEPVaVz@}8 z9S0bn~8Mb)mhPIzO08mZ;gb zN8pviJh?#1Fe;NZYwZG_X6#!K`NHOZCzcI(HU&Hs)@1aWN3c;ZXCNqO<+0>W^xr0! zIJ$#JRTRK*2RGG0CEs#kRz4nkc>#m$efp_;-Fy;+XHEo%7fjF-ePd)PP>rY(+$`fW z**WB+6`7Sq#C$ki0O4Laxxt$R39D9y>Q@m5fn_qS%H#YSLaWX2XV_TkzKMVKXDHmB-PN z|5*qFU}46r=f*k$J`8Ij@iXcZ zOP?*N`VVoKZ9tv#-=kbdQoZXY*zg^j;i#b7!fW_V=&0|*C_%EQr@CK^Qi0Xg3o~(b z=RleQ7zh;N86`s=nt6UVAOUz)_MY_N6(z?uK4?;R|8^C3+Ji*tdfyACwFul0I2#fy&cgrZEUkv6Jr=1DTG zWM@Y2Nj~u;tKa6bal&1jThMXI##K{-DFg|s_y%(`Ax$+F?@QBevy+AJ7XE4xNQh3d zk02Qo^8W7CHNu$^Iu|;E{0A~)n-WvJ$CTS+SpR(muApsoK<{NHOUE_vM-11;atNV3 z3)*4>Q0?jFhr4`(q2k^}__QtM^Vx(KeJ6g?!jo!=&O0W$H0;GW*&Jm30=x@$5JEEYoVO*t!2#_%hI=@LVHTGbF+P8 zbE-g0{XhoPn2ca@zGdgty3DTz^5ZqY zVW0`1KVr^o@!3|rpby43ZU%uZ0<`2!z=9(#%VaOHLb$I0>H9*c;bW*^61pWHpC);m z5v5ed>elOtN+tR+l#8vfT7E+!q4$sma4TVN&nXZ?e-Nq?^-cEo{OS0zY9fuVgdw@k zD| z%Q?rlb}hD4D7%;9UIeeA@q=p6)Bw`@ z^Mb~aTOUbkQp(6c>!SwsAF9xkoZk8J%`jQqbGsINX^cku^w)^xR)(y{Cv{X#bl5QI z5zQ9O!bBG$Mak!?@EV0ZJ@}yqO7Ep=n~-c^h@|&U6zZBu2i-{?@?g6MyG4%sAfyxf zvlVqhzyBiHFNCf-IAm%d!oa}JlxMLOt}+l4kmAaoM>=Qzl#XUkJx2OMU|PmdC0edi z&WjHmt7F3xhF9IH+UD{S%q6#0iOSe7YtfXaW+$y!#axwc49Gu^*Uq^Bs9_m~wNDH= z>AO$O}BO8E1L1@|2N$7RQ+iz}Z+e zHQvLWIJ$aE9NBV))=|^%Vn4j;m)eh`k^ew zQtV5ZK^^b#yC4lUgHE$=JI|S`tz&Uy%Jmt^Nx44CjEA3>bD9EC#Re#)XDc8Jbc||9 zRvbK{&0If0VDNd{^IV(7CyG@1v{oN(8MPO5`2n$B zGBpqdfu4909p4ZD1*GRw!b-|h#QbKx>b&|V zxL0PO5Fgvfda+=oey-M*yZsU_PjMps2pB&t0+6P$`i5X^md__VAaS9)ot< zYsCf3gu!tvYlA6J+0kvSW5O)E8-cVM&6?(0s4F^o_K(DD6uJFDlhb41I|?2c7eM-a zv~Akl;P$pkG`qo*3=`nx#>q)|i?3$T!|Dc^i!7N|NK(^_f|mhQ&0l)A&jO(nZh7oP zV*{e$mOM7~6y1i3YW|=L{3W+@wAJ>^li~U{7NU@D8i$w?3L7%3_mETWy)ssQ1t5rq z<7zEDuuLBMl?mxutPyHtVOCWnt+&IkD$p%;WkU~xfH`{zZT1TeR#MNYPn{V(u`%6m z;twZ~78)sw4GpO5UAM-+1lmwk#;v6QA)AP9`3J@; zqch;MLNa01VDfWx3xEqtKuLI(V+&2kGoRc4;DwR9Z(%|nAD*dmjO5_UXG2Z_Ad%uq z%u^T*P#Ub??ql6O2RnrUyIGI{?^lw^btI9fkF7V^8t*qz4dEe!>!3k(fkcCAZob<7 zgkJ1>z$tMaV^<}|69@J?R%v|U+kGk^+m)ICs}W6u_!D_iB#)gpt(Bk6Ml!Td|JF?& zl0BYqMqy%nOa%r-3!T8kbo)0u9l(?|^C46LIw5QgnB(|{J4i;ibid~SD9+;#c|WXhxBWarGe29YM=^805DRfOmNxb&dzGAH6<_v)7y}E{SXo_ z>gs^RP6f-Jd6OK{OJFx@@YnWp*~8u3fzC{?qR@;f3V2-&M+^1Sh~VP;>T|{cyxf40 ze@?K-$Mm)zo(pSbSj%{3aG8mD!Ns^Oyy#dI&9f&3_4AH6++$9je9vsa6WH!MDM1|s{IGtr)HsC$OYBr!JmWCEa)-fY z2Ge{)=(&)WhWwXSO-f^1T8{%-L0+|=cmS*#bbGCC6SA*Hnv%ZSV8F##;G}%@kSW3i zG(a#4%rNx&&FeoJ+f;=CJv z!MZnFXmsf@w1ILYigg-|hIF6yC<-0X3aWAfX3-xe+sN4;-9hfbQ@k$QPD5!B#7%J`5Dv> z29E2DMt`54A`L{$d*>DI7Al?f+OL%><&yR(jfNehn!ehi?5`+yH@pgOzEWkzo-M%E z9~2c>S0XhRo&M&g0_$=^<-^%v$k;}sqMYd65{(Tyd^4ba(+AHx8wNh~_IzR&j$35C zIyvyLIdKlC`?{S008T3R#{s&X8jL^G)==mcpqYNjT((q(Dh_;TTi}QW$oIbowD)TC zUizc;X?De#))TMW?~xC@4ZZ5DUsAaRR$Yoi)uGZ*5!kI=BItnQ?vQBv>!|j>8xp=* zB|AV^cJT+#m+;yE_>;N8=T(Mg4x7TS83lv1Z$4t*KDL0pcY+ZOsrljojeP{O=n#NT z2&{WDMz8knpP9OLXlYZ51YhjPN5ltu3NoZ@>#^PSbL`H>1}KA&xzO*4+b4hv(CiP7 zHZ9%0JN*tOXhh|Ei{JtLH;|U`*Pglihb1x79aNRB#IPRQvIt{Loqr=pMRziQ~eXs8Y!etdsC^ z-jY)`dJFjFVmwhsfgf8dkTl886&WlJk6JmSQ|J4c)U!v zRFsYG+(Z(9>4@788_RZ`YyiR#{ed^WIY~1^X(sJ?bNPqoArx89ezkc_C~u3zF#LGv z`l+9L5IU^RCN@bg98Z4W>VN>I5>qiUMdm0N%&CaDlld`jP}IQmSqN z;VYOFFf;+ot!yX-7g1AF{_&%g?XQq+D}u-MK!b6X=RSV3^lD{p&qgy)qtmZpwQ_=G zymx+}TaxO7ozsES6{BC#eo~#7n7B_k8M1salNZFk$Oo9J5h%N6shguIYgXLQ%jgzr zW3_Ifa^6nc#W_!7(gJ;gHKyiyYxWB(x!7?q$`{pvDuuvziLAIzCF?%}S#c!H7dzoI zFSpw+L-~g=1NX1{!^6_~X)X!F^YX*iyE`uj_!tIY6f)4zPJZ`5O98E9^a7h8v0>Qv9RMGZW&k|8r9p zknzZ9u?%RwR9P%*_jqDzI=P5=nli9l5$sUZulbTC^7wrB9cxw+2hb~AJwoyP)#Z!v zp~0VJJbHW>VCT9xCl0&YHvRK4aY+L#7gjaW+`Mn@t$Xj!X`~W6+jZ3n{AG;K|C3C` z(`V-7K~6idTPx%vSgz*gByf#$yGR^ z^y|ltr|1@s_Gw+=(ckoE9%SE|gBRx}+ot7+!b{Uq zM)CmixVArQ{PIs?;?9nUI1N;OA^+NDkIvBNCIsVbS5(7yC|xc zlaAaE?52|Y`n5F^UwTV$G59apUyBDM&9r8n{XB}bPu`PUEFh@L9cjMCs{d$V3A@OV zd@QgCwbfYHw6up=k8k+gWZifEZPP$GX@Y1#_7PYtjm^kc@RZ69^>oyw%FNz5Df`+= zfB)5hIK!HlX3mBC+m{s}_jd;e{4nGaTRVKjf&=pPjeCfo=L=3Dj4|mz|Hw|lRPD&< z8z3abubdB{TQ~qB5r~tGc003Bnj04elLy{3z*U(XfUDvP9Dm7QE4nEz2=|hzNVdzRhfE_ji%2hz^11aMsMH!a%}{U zCV_B!{5(Rp=)yaxxEJyCqG=d>`;ZtwjTcmB4A+I*_C%?Hvzd{yK90Bt1|njFUoWCh zU8xa4cTZAu+o#KGc9(@0lSn=h03w0_n|#RWEV_kXLZAWKD`A$gXY%?+U=^Zv z>VNfh%(WhayU~%V8MRwFIj!W);EXjeii=@Q?%H$IF~*XR`p^5)S-L zEKeU^NH2hvxZ}I40u0U5tQ(;=dRses@7-O%F+Tqq|J}}8Z8gRnsx;obCt>JNq}lJB zV}y-Q%XPtf5vuq(wKS{s1&efQ!?UB){|f8M_xVOL-;qUSiWqkXvvG}DwfuNZEp$Jy z8S*JN>xFyH6sqG6P%4Qsm{`1QIeXL&8JGK?flc?1WV0ut*F>>vcubFbDsEq>@|r(b z@ns`O5*XZ8c+|P0@TJvw+WvzJWsFZSUXoK5_k<8Cq6vT=uaAz7bYlNGGmf|J0hu_E zi$$3JpzNGwCWId>{`V`DlA9Oj#1F4+Gh8gX*JfOc+`DGN1M%zm@J^U*GYO?`)tG-$(phT12;{+n2)3QCGOoNQWbD4%| zB+zr0bs^&|c5w(RX0i>JpIJ<(G%vezX+9TyaA!d(yEJ|pk!=0o74yMehAyR8_PF23 z6`R%l=lj}B;cf|?qVwim0zXyAhOJD>)uYnwpin?Z`=Y{=4QQyg4wzJI)Nsw`&<5kk*qScw3_IWK6w6nh_ya6+~9~2GLa{?oYgmK2g1%(vH~$ zbOb8&YY+c&Mo+|_@BaB?)>cxomR2()V$zqtDP<_Iwl(2;iOG-oBl=nz%(WE6S;gZP zctzS4nK5ZJ;2q=B6i^rZ?5I)!HgIv&2ULPPpevHByjS#-s;Nq4Cwyprw^_Q0PQCTR zk_Yr_Cx=ciGrt4yK_%N79bA`3cDJ&g6?}vvAM;Dwd;~nFc_n2S`!hLRW?swdO=RhI z{bkX8E{G|=L0X5e+Lm1L--#C}A_93M{TLc!eZE1C9EjumE zX`XT(Nk8TE*7*zedbpQ~ZkYuFcK_2`^atl@Gn!gCvx?q3e#-2ZqxRNH(odH^WH23| z#0^U8{1)ADR7@)4z%kJiR_wi?V<$#(i-DYg$yqOc`(Q4RItuq~n-*`MR%)AOhn^J3 zv`!#Hna@NPvW}hXFyG<<+IE*-y87SOcpMj1NZU_U04=`gSU<>zOx|}LD^1MH%abDY zywoWBmkn_F5{4U|MUw3|V3KZ1MaAucmS!UP!j$bKI@IMJv)22;S`-V4BF4uVAaQPA ztj)yu$fO6}79uYRth0YoX?^SzPU!bRR!`I#5X~mV_%PU8S7bp>d2zO=_cenf_pH=o z|BR9ZWrxb@?uHK`WuV`Iuh)eipI5WZkFEjzX?1OILV5=60cuUMzL9?o|FrbBYu6zE zRmQPoR}Tz|KLqGQS$;=yAU{y%aj#?>L;Yy>mITUXIY_HqcKO`f9!ls@n`Z%}Ts=Dn zM>iQ8eF?4Ayy4iBUjv#cDTb9PBLERWGVG_S;Sz3_pe3~VpQ(`PWO*6*q@rG15k5F0IYoz2jcQL z+K(goS_>lf)w0-tsrc`&9BJVQ$er%rwSA}IM|L5W(xz< z2YX0UOMs2yEoP)Zws*c`I>5SHgrts=S!@fhLh-1dX&K4Gb8cV!vik{5s|~1mv)lGd zz_G5nE9w!Lyic9Y{Z~!(XSa8N{kBQnevvS2aD-k}_ANYp`8PQ}9ljO{^kw*#?Thp9 zMf9hpL-4_Gx(ZS9RF)p#)o?(NDtlO*b?XAnRFkwtTtN%C7ND~{F#lhj<+FlaK*RCY zp}PNJ9X4QzS3yC+lECtKvzvn;`Xne8@RMC$UPrHNY>Iv;ce+e4Q?qnfklX>S9e>vs zoYZ2cTd#hrPS-Zi>X3fUA*vYl%EOA$hjgJek2sGoFXno1&*`1~A3$t!&F7^-oN_OR z*8eL#%gomFIzCkL{=)IY)0G*lKear~ygh(yPg`UKw(IFp-ppUP;0O^# zDq!Mv7*)Z&-A|2cTtG_!VBI1{kD!i`jI1*q8>;q6=lf;YSIhsE;*?9WQ-*HS}ELhEXvi*qg5t#6EJ{kN4%oBVLU@BA%@ zPk%8yF_3R!9k{~Z+S+;=z5PX|!d?^*FsA=_9~++#9mTKz`Qy@G3%l~XWMvSDx2U%Jc7jOmS=#V!1xy17jtCAt@6M@eaL9cnkbf6N zsJ#huU%~7=suzv%u>^(x_*lu0{YiZGPhVr9{+0XTDS9a4W!?$HniK6r8uNP%(iEC2 z9kbxSdY!?VFgY^h;*8hAT;cMK%!Ny= zbgfV|xZ3=Z52XZu&_3-E5wiB>A*(QG9_Yun9Ig%JkByG{ymCh#<2a!vnTLOe(r>ngH}jbV?>I?EYepxn5&{BSqbUorQ9CKF@-{6ekIRj@Ps+yp5u6B zFX!~zrs)Wx&n$1BF&!x5#6uDP+Ob;=iu2o`4w8`k#U5Up+q;`I2-6wyPo;6=OQB-# zN61=|Xmy|OL&%C0Vs?pfhx?UJTjWKbczrtMRvo@=h;j;%Ul{AjQ#`jxt#0u(WYpNC zk$CJ!FdI;33848l^U#%dQDR#=Hks8>SLY3X?N2xLp04A6gY_mKLd-&>k`8VBPn zQP{EY!2d|ub)Og2$0jRfUrw3<64O5zW|OS7l>K#YrY^mcKH{Liw~U(-KCOsB5{+#^aDIK5oYl?O4kRFF)ZOA7>ff2fPOUSmX zD?kNtBfEIExA9gvv6;23G?3p|^`Ueo%M;O($0n!)ULI=Yeki>Q=Sudzm7m=OAVNq|MLY^ZtC%?nPxHvCXol^~9qFD24A zKN~!&-A4e1F8L!V$xDN$g!iTF2(MX{!-I~b*FsJ5Aq}L$YW|B?+ss?sov>Q1&3zMS zV*?<($xM;>*0K_;%yvOpUH@OzvBi^qE=sFksNjbKNRDxF{U_=*9nUXAVvjz)!xH%& zIE9Z$Cbx*b2Cac?wY<+MPy_~6SQ>eWslUDG7C==i!PaxDefsTvaY1}RDf4k165!I& z=|sy_6Sz&P%+@9FmqrQ_6pxFMdEa`Aa)>v(&O zgqypK8*8-5gkSO3B>N5k}%YZ4dkltkc z>~YPP4g!&5)xRd8&*^BYEke{pM(!8)>_UNHM$5y^7~@YmIF06szL9ftdO5^CX@&ob z<5{%Fme48ZaBA%9;cv~Jbc}ZW^Dw0K7>!T&4MT1v5xYO}t9=>bnTe2fI~UU|cT~JcP_uH*1urbWsMF$v=JR%$-Us z!qdItEW*C*;g(wc{=5G0PBl#frz<9OU9{3@@4nXnVk>TT5?DB1k_y$2d6L7% zwYk`e>d!fldq7Rv4Oe@n7{b1Y$=jIxc*uNk2)BpTjSH|zlwID947gBdmy2A>c?*!6 z|8XOSBFQ<;mqN!7T35-v8lmVl5JjPx?QN2TK&-n>=PtW`z_`s&$%<~bG_jZJ705o0GJ`^YL0lWD^f6r-t_jHzbNd^5!D4sd#Ix%MP zyyl9{szAldAhAcD;jB$2*i7^btw$Jd*Zp}e%)T?tfzz$y@7*@Ku0@njUKahtS1gE{ zsP)))MnHBeJ&UZS1?O+!lYG~F+1iL5e#E;niQU3wFdi5i^^T)44 zL0y(utJy0E?$v?ZuVq)e-HvTQTMX3WH?iSLU3-?|{GmVky4PIqeG;94yO>lz|5`v@ z_qZ>yXeSk!AR%=x9(HSla^0GGE6r3xC&yBf{G-}mvJa=Ea;x(vP85Gb*UWHZIk9Kq zu)=opObbi!?Df+wt?2n5>w-zAP9-M;-CkW~$if$LJ9ndl-u{}sAwBwmOakC>KXvUw zp^=~;n2q$E-CC|3^XYNK8rU z>1@h)Ar|$@`cd?DFg)ALJBG$=KGDr@==<~de_@Mrw+c16w7)t^`OvRX?>m|H$e;2J z>Q<#CheIl(lP+_rE9~#_I`TeJ4)dBQJ~}8`8XbdFdk36PD8OwRJMq4)RuvKl zx+BHS_PcDrU9(k}qAe|mRx7}^C@XU4o9^X0Yg45I!YD@3dLy_{!|<^;G((Ddiy*MR z1>Hin|5DK-{|}Q+u59``yWONZ7lEjR?wvJ&B%-;5wvcqQCLce3Md@X}IAk6ar(8O+)is@Cv?&^ z$P%~>uu}BuZnauPcfoziv$;n!4N{-P)MG6sKS;Kwq@H~Ar~Ib+T?IXa*O(a%Ew4EZ zv6JhPs0&(pVacBMaoZ*X(qsbkn;+`&7@fhYU721qcxdHZkly=<<{vOlULEoEENSn!z^+D06ZE}+M6QDn7)YmxxZ=nCrvZBNFbjuwD81*5B9YPF0 zeuokQ2Ap`LuD#uNGmg>;^KZ?UVPG!*vMSrP{1%{AE)+hfRoq)*T-S}>iRu?v#!CeB zm)wp)^F>!BvSB-?>6n~DfO8gx*1@YAO*vTb7 zM8EJzy*22Nx%!?gHXhw0_z2uq z1L+O!3+|GH(a7shc~uMS1=?+sj|Ni$wFc-?>~{!(C8rlqeGkIgqiBn9Jh(6b z&M(fTB1~H}mhZujR`{TPui8rf-z5NWljrN6gHXBv<^cMDYC6HUy(H7K{nuvRF!5t` z9B>I#Vzm(4rVRPHJ^`nz)!+JGEkJxaBr!)Lx+N!)4PMtaz3n=2?V&%JCdR(Ai4wzZ z?MSHqFkiMm@mRrl#%<$-?GlJtg#o-`?`k0W+jIil_HW(&DRUm)MES~NLp9oiCXi** zql+wE$oeKd)Xz4~ACUn62WyA%yQXiwA&y~%S+-A4>|@nI#3F^@DvtoFvj<)TUTHGH z3MFCn;$G&1r+@F@f`-2RH>v?1@US-IQU;Tn(eY9BR&V&Ix`f5lT4xkfn@)$(=F?BW zH*%U4_pt8zAVx{54xN%3`-^J)a=cE9UppY2mII*~M=g`U1%;QCo==$ZM+;H!rBTAE z4LhcSc!H||;eCO1qkk_ufaVEX9MGB_&Iv(jzT^jo-<|gBRic<R$0Z})^0!nYHNm)6gGvNMF;8V%hECBxYw>@(CZ4(RuIaUoFY zCcS?ZyyU-r`!we_!IOgO%tOO$0lqXIP=z5}!W8X_06q7$B=~6Tk=y=PSmzPl&S^m1 zOgZ(YRY7J5ugmlFvuVQH<9sVY_+GB_QqK2(&NziC zZa&!iwBc!I2|XzZTiJiM!Bz8Ek_1XSL2+Hr`A^E*lXCCgv0Qpt=Jgup=Dx{|(iZ$` zO+{UM08n0eHxYMGROpz=1^VX*aqc|urOuVL(K9#3eN}``D%5-g^djUJJ zU^Yvr{_?8*%pAE`CCV0ktIa$+`gReN`jao%h7C`sZro7X zA5}ABm|@622ntY|>0UHQ7H3gff)|q_^-N;${fsEyjk`l9%uD=yZ+)|-FA%?lKVNbD z3JGYOU{5yF*Mi%po&KAj=nBC07(gISc248>SGI`8Qoth4zkRyE*X5ZTK~j@87oF4^ z7lJlSf1>%fHQttW)a40?>(EJm>|@nUax+W{v<6zzOvF-#uHUMeh52I6PKs^P7XQzU zifzoN@q53!$@*f%O_whHKqUnEqw${<)G#^+TD`6O%|P6>z@jsr&b7gUlra@9a2o`rTiqJ zs6=(8A3=nr^V4 zc5lNo9A{lAc-3`0-)@R9$vt?)^r9Ovhql}me<0pK*iesmWci*3)ZK8nGF8{-X|L(_ z(!ajh^~aPxDBi?emgVi!J2+`;cp4oHhJcbq3?`q=%Z0)FnEHu95TY9N3~ zO)CRLIUnGuw0u}+s5R9*yP~Z~zagrynB*N7Vk{;Xm*e~WwDWyb37s57s&cJ6zJn^$ z-^EuT4pwwl+9x@Uf;Qtf=xB@kEbvu`XvJ%QP!ZHEM}-M zRmPU=v7_38$6>w<>T?(XD&tKsR^%_1iw2pkGWkIa=sF5l^MkFa?J6Mu>;HDGeKQsE z$!NO&$faEX}s$)JRkc}_Ay^iUWxe#%=AN_c%*5Uk{a7FfKpy} zW($Iplyz$-KQGzCJdLyXd$Moi9l9a2bdnA{GjLY4Uj!;r32JWXU4VWyF(&xHk+*%9Cr4axd2ECX05e5<_HQW-ZTxS*e!Ds017EPDjQxUxq zdGGJe`qCX2UT^lD7v$}iTvGhoMu5VK&Uf456V@SMf7hx2zWN;u@$!-A%tjv7u`Su( zvz}=^{6hpyqGmje1xO-!FE4n8jftpO4=^X%Ohvm! zV*5Hj57N=aa(chDv2^Hi5(K?R+T27=WIB7=fMM%66V-_)*s$WvblvRVpc=W~y*E8@ zoiT@P7jBq5yMpLNMS9+qh%4JnF!@m=C*j7M-L^cBohRY7n)owBXOsXRTY<%l9Jw%% z&JmjuFTZC;JpJ%`Zi3(Z{=Sm;H7Qm-Qugz9(kaWNuLURGZ+?-g0GzNQ8LJ46y~51I zE@Hz!3-dM%nSK&I$}K~DrD$2O-&UKn@mIoq)gPg=4Rf0sSYL->w$}lzLa!PTe#|kq zY-R}6pg)8Fn~U7re12*Po#Z^!N_89k#m}{C+B0tEStl;fNLlv5cO!icMI65|mA{8cKqn@LC_M4&rgV3o zUPyfq2ZY)nyO~Tuz4HmnXT5RRh2u&0qBDZy7--l#(^-IL`4d5LC5iT6ApmO3N4YhIjk`G})BgXs19!PZtCg*0c`U#(5;4bcj9 zU5yy{0%`wPN`VPLevuoR;>9u6y;98VwXgdX#Mr52jQ0^IN$Q$&ZGDs zDJ{fIxW(jdTP>;7Gg^-MLiANE|KKfFDoU74(p^3N;+qra5WWX0J3~ASSR;qo8eTY% zX`%jLqamM*Cx7Gu>+(M=JQ+>F`TA4Ru`tq)aMT_Rtj?zQbe6G!;I0p?l{3J6x<&mC z5%6Xl4~(_n#2ohSuz$lV$!J*ghXs1{)&Q{rhiT^w9n!yQ&O`Vf41K|^&uGL4LQs}{ zQv-X^a(z(<92v-Q>QHuRKWgkN%A4==X3pqarD@jVC$%~T^HX2VHtGvfKLMgw6_CF% zb}a&s@=DL@)V(}P8ss!uer98Y(N);_c^VPgfc1G=9eYGrXee{izCQ_X(Z;Q}ByS4~ z3k6LdB-6l8Z#xxr`3qS&6p0ajwQ?(ZoODK#_uHO7y8HKl9JllUvJQiut3Mgc}o#VzCC=5IbK9(24gGwAvj zw!P;f%rJ9;;aL1sd3Pj<-iRL#J_4oDBD&p=lqxvMsaS^Bhw&P#tE<5!g@p^Ppa~Je z$;k-|^7n7~H8?0;5~F>#hvpKL2q9At!{8UrY~aXFD5-yBC;B}(TGCmPI^hq{Gp0Gq z87^1TvERa>Vb||pIx_A`9>S&(*P*|Ru)zBW*-;^m=2o1nRH7MuR{oSOelwm08A;0R$$lUb(@(zV!%_^DR5Of! zYO!TGO<7YTWE^ryJysEEyhjc@jG_N~acyV+y6f{kHz9$3Te1zysIw#54rM#aGqr`e zJLx|cfH>C2S-@WPDqY0S#p;!8o=VccmOa5AvdO6?4%8yj>HXF^J`5>pyzULH4v$s3 z+k?b>cqW(%)Uu`%-c;&^AvVBCL%cVL_2QYrl%=i*XmDtJQsGbJnE(0GTo>b zi}DS%H61BM-Hb0`{aKiPQXi^0YES05M(+4+bzX`_kqK}aXEs0t?5l`BPWie&H&;k! zM?$@7+7(xGK@i>^)7Ma~PRBiT#=gt;LWf0cW;h|9W~E7FamDHr?QW`HA>T?esm{nV zDQaU5t(S>eOuO*Vkki`g5uF;iTdmF7dDymT#5d|4eLjc2!JjW;5099KY^A!_v#w0^ zKkIH()2BMzVG$?Ffsq66HAcphap7Z_iP;cdPJHYK^#;jM3F6Pzhg3UQ<*rLT zG<8e0i$b-@z39lKA9Z?`MtIgtqhri9ySfzSF@HjgGA{_F7y@-w%n z`R2yNPrF-J|J&oG1}SymiZ6NN=afR0*hKMuALW<{$)tqhC*ll-u$C zs>m(mlMASCu(6s&@Az(0b`1RNncE>&chMJGcRgr#0Z{IHM%&KV8n(D>AiC!!y=hP1{`>r9 z!^mW0D{+=`mJIVl?_^Q_73#|2$RGkQ;Tc2?o!xCw{n(u>S=jui8ONp=s&_Js4f4K~ zb}A?NbJ@enMHnhjm>LH;1!F3hx*C+byyJy5Hj?nw8LN+MLR%I43O&qTHPgK{J@`>e zfsaJWp=PBS`XJTM3ZhQH1$b_)#4nr>_U%yAb=X1ei=p}}LRRmcaj(aV&b%4$MrUy{w1}@7zt*Y&&NVO;1!g(7JwwDDl6tllA zMs-&fnX}}~fqI+i3Px`*VRK&ot+a*_7k%%p5>^0q7>d|mq(Ha7nnu1WMwfR<&yBjx zo%X>+W;Wg^9UX9xN8G8}fD&;L%VW&AHqBs=yza&TM3BE{AS?=sDExG$`m@y*4mx>( zkZ++VLxYI{N?z*8$-LC-oydR;DO2k2XuUOFef}jrSm3jh+1Guo6TL?*)`G$Zhj-`C zpRp}b@kr|QiopUCqgz(H@Rx?sRaqO~CpDnn&!Na+w5lslS&q6zJd3*mM`WrfT6!(SV#MqP|%Z6Xc`q)@7nFpnhlw4xQ##JP|&2F2> zFYvm>eLwcm zs#Wt$;v@HP2oKFRt45Zuz^AY{v)=kj8z*e!USIp#ZqkisjnoT|+PG-L+s>BiSqaRz+t|nU)%m^@v|~_=)%cvflG`--)US z?jsiq%ND-;6HuCGywiR3o}y9KoUv9X4I#i{cmWyvio}=;ycpW!#8Ox%9!t@-En!?p z9{3aPlti0;Oii#7xNC1u!d#N_M;!eWXf(m(g%bf$@8`j>HEhHwH=DdjZqr*|2A0OC zYv-?$>c@ojLlf((Nz(*w6<-yD5#i4smeM^4`dl7a&bPuv_)akG4{@rWE`Ga_+ivAm zReg35nV%$y@(0Z@d=&r6Q$3~qRpfQ^MX)q1yZ7*e7$n0DUXG?W7cXAoH!nI{O{%rL z_72S;IMV9(tZ-zBT8$C~qk-mtf9_lRB~0a>OEUqDVCKKyAYw4rv`Y)zxx%U#M7?um zXO7V%UpsQCfc)&r7zX(_+irnfq)3q=Sk`x|In|Hfl1l5hHx0lS+T9Z-rOKa!JBv|cyUVCl2Z%v1b3(W|K7=_BA=Yy zOy2;BH&>f>Tncto=tG{j;f*$4+Y}<6;!+HUJ=5@%lt{UwwBc8Z!y55usg zseTN5{$`RbI|N-6cp2%XR0)RwGnbYd$PRF@1*^9W0__H$%Hwm9P6n(zi(A2z)5k5| zn~IGys*TwLEA{^u-mem)^>m=zxwSYI8q$!tro3^BH!$jTt;S+0)oYwi(Vv=0+;^e9 zLg?Vd-mh$6Eh%lvu@H2(S+c0vCPu+c)9NeVBQNk`*{i%J|2OOpQ13;oJkGt*GA1B8 zV!lP;;D;SZ_3NArp`%ehd7Q4`*0J}T_+?B!21Dw3D-9@A{NL~7@GLjCj%%Yw(7)*S z_?+q>UeB}dRbWP?`S|G~Z!erz~>1QdgcOQu1HZdonTSQ3NNYv+(Z&4?!%>_Yq9NBT!bbz$v;r&Z!P0})ggElIjA_0;Ril76Z0K;zWHe#cFT=)D|zCVxifx}-k?Zj z-kR{Zx^0F|5z49nIaW&fdO`;n8!~ix5PoNQZ?7M6=ec*|O}K97J_nT=1@g>S4nxRv z6?f2Hf1~?>lwghryN{Mv{GP#!c24zsg}P7JR8(G0@PX!ohHvtZL9{a!wWH>rEx^(% z>br?f6KY3==0&tyEHnh!=v7&)`)(yMl7*PPjtzEQobq_-=c}XJU$KT^7LAh;@B52B z>u#h#*w01Kj7dMrXru!*|LzYa4Xc~=y0LJ%GWa4Of|j{uUwIe?q(G@x>O1w;3>UMx z_FUUj3l#SXj9Rk2L!Hu#H^USFrv!iaS>^zHUSjs7YWXX1`$peksg%Xqig@4pUxx5e zdZR%9#i^`b+f*j1v*st+Q8fYv|J?2?UjwtZ0+vIzY4prtnwxO1k>?3(z3#Zce zQrpW48Lz$LPYIwTx`CqwU z*vDTE_8~7ITNIwU@t+SrgIk4%@8E9rKf$9XqEi=)SrlB{|6;A!@~*1r$us?hZ!ZYz zc&K|u`4?YhmM9V(+djUjSu;TbC}7UCd;jYyXm~8(r;#<$-G{wPXSB>6C)u_myuiqZ zRWxdCY?Bn8?*10w?o=R$^xpm@HK@rg#$SvIDH$m7U;M0R_LFU}4ASk7bs3sgcsSYJ zXg1?f=BuI}s`iDX{tl4v5LdbNjvtWCMj`UTf6FLTJ&4cbeZ5{o!){p`$&Qhj5_t)> zlF9%Y{`7cOh1sD>cekD*z^1XU@_;p9AW3-=*{m0F)Sk~~A2sWpgvY!sO!cifDv7Nkum_sxhf@Bkb8S` zu6dBD(Kd^$_;vCMuoH-hd@WAjQSQs2fs7F8?AuW;koUCDZ$41)wq2Yud+$|{r-t*y zR3Xy1SYATOLxPu4lW(%kq-bSER?~V06`V1ad+{OtPKT|n?Rqpxj9C2#Zi4S*o+k0R z@>ltb!=3G1SB@r)?Lbo`WKbkK*KwijOKc=xXibpR8_kwX*(&J)-FK<0KM@R~=?2aR zJoCD7mnMBwzYU2LZq}MchI zE8w7&EE%BBhLG6)k8~w1g$T}d8BzG9N%y!ubuTmF#veBJYp&`7MmY)}Ked#*AGp;{ zjOy`5)%|cPngTYiga|2O=gnuHrQlBWT2V;|o?Ep;>%w0!E%L4;$)@e7LwZeJG^?x(tt+d-wQk%~7#@lOgs)sfE5up_MSiFb5y-<&zt3Z?Q zQg7-nH#u^M{)iYQZgS$3!2UuSgslp~;d|sNxBrw$Sib~pYheDp49EN+dxXpqPz!z` ziY18b>3tUa*L{d=OYTYNQn8i&G;Jmqmp``k?{2!C`YRVYZz^v%G)zZZl;FDgr=@0* zq7Xc98Yy%(wEPn%CTdOs`L#=59SLpvqX}YCf>U*1#g!B`n{~tU1BU( zQ_K%f!s+Sk7MRYSl2*&?B<@rTCw$X$)^OSYKU-Yh9LB>R!x-?554(cy&&S~VE^0rb zzP62*0m;zq_ax&W3g^mvJ4MHeq70T=)B5(Db@=?SKwAbbe!{Bta-J1yj=c`6yP+-e z`%m)#-Z!oO9MW($1-7xcL$PN}d~y>GMN^RP_>~kV%TNBLpZA&ZR_F)TW>hI9;_|J~ zeC%6)Tnt~C{A3opqT7NE65ReGzIE=J9)tdc=77mteG#@<^#ef6WFZ(X`dl5xlVQnZ zS3FXOTfkB2qI}L0ru>7ITU$}_3(GY6dT54;&KOsf;P|1#M&Nk9r9%0&{Xnitl2MF) z0k`a>O69A<9~I77jsEBaB|5|#(ZAJIMetONPSSiMXeAUS$x!t`>7F8Af&n*w@zEox z^PTZ7_msA&jmcZUiH~S3iDQF$$x`{$cU(kYn$*}-^yAeD**%mS`&<||8zaxFHSq+< z{+Zp~`QJ z2&9yvx?+Y531!8+R#VAAyIaufo$U`S>e@sE#0zU_`jF z@y(OJ){9ejnF{qj(tzYD;V#V29hWIxm+Q($r*|dZj@HXcM}si_C3@Y{xnu0(`ke<> zK%qW~ar*%$HsQyeXuT%fjlYA8MDg$#Dsg87kn89Ei+4h8R2!8Zv?9CCut3-lpIXD| zt9X7vk6exRR%P#Z2^IqPGn|XkP*bAX?H4Wo(kQW<9xjb;id16ZP?VypfQrvl!{9Hs z0H}joZflv)ySMvR;~A~YOdAJkCink->HGpc&6mk0ZWgbStotYOg;x;xQ-$( z{j#eA6c*^`_Ness9e(Nn`B%s3UgAUafAYvY_2L0`)|{`!x^{KgHT0=u!asMNV|tZB ztK*Dg4Ak~a-FXPFm{)ii)hfMGFz?t=i_#4u=4*>Hec!E}SfB^YLx|`fxWsh0QNx;txQ88G zAL-dTF7BGQakF{D-1^rqxi*^$t{W|{us1YYht?mlNhke^?Q#H(9dg~lfBt;UbQB7C z#A{_HJCKN0$L~klVll6U-W=bc=$Hv#NRkHYqV8ZnGfCc3q&xQY6PzfveE zV@yH3j)C?Uq23#dQ#xIu1HzDdae4Rf`O}pPSjr>sMH)sTTgQ9FOw~e$ist&PK8)4d zROEg=b@D2iwvvQg5PT=eC07SuVuqz)7tiU83}V62y&fCBFM?}l{&p^I*KWL*#NB@J zzOu3~k^%nFSisu2A#aPIf7fM2ubV>J`z9AXn;Xoh-T3%JF~>EkD*PinC_#M)YIIUA z#fSsyh1}ZOg3(FRa%iZq$n-!<5+j8>OX?p-^n+CQT725?CS03;%HlS^bLQ{LtD+hW zD(q^|L)Ks~0vC~t>r0jnt_f>F_*D?eKfh(U|F}e8Ix(hT*mE%KKy)aS?=0vg--Qe8 z-L01eoKH4I(E)SA!{0#uycynb7Mc3VajCZy%s%x7eODMSZnAk#R*-18h#I1uFguw! zYWI8`B}vxql{un#I36^cYyYjPekOEyPd?s5^+88zx~LTwDF@Mqj1H0VE{edI?!<FTi)PAUE`7U7*p+tO?a=%h(j%|rr|sLSp0U)y+L@BFg>)swZ? z(=4SehZy*q~mk@3*!%BVn}PDD1?)G%<(BSm?(;s{tDyz5peVrXA~pxp+AA;*%8P z@r&-pkG)&TEP{02y4#nNzMWKY{sBRzcx?n6DFyM{W$wb|0$Lgqi3{h6RKwV?R6m*Y z$tCAEMQuo$RGRL z2f8mRDY@m`JeW-I)TlnZ`Rp7n^3#)d4=9zVUdzD|GAVX7+6+`Ar9|HmjOz5)wj8VY zdOcj6s!a8Z)9!zwf+@LtslF)4Coy6hwrO1ME${s}IJ57WT;#&-1ilqdBb@}jbQ2kK zSzWvMBO1O=e2BbP=UN0l$KD`h@6V&403o?fjsi>>$0f*Q7X8G?7aNS?of zot+FOLxF=Zo>aP?^Nf_l4ahq9N3DqyQ6IVlz4u2}QZQlp*!(``k#OO05nPa}%wuL) zd8YjkIdA12o04Jy|9-%-gD=k01fZLdvp9bh^Vpz4mD*!+*|3pE_Lyssi|Tr7Jsc=e zTc-P39L#ZGC&iz(Qe?6aBXb8V(5_t;ZcXtk#&u4UC&G~;2ZV{Vo{#-N{NCOLNosTp zt#JScDiW#m=TKtDWrDVJw+S0rq92I=({TowXzl689i9ShwiXok6`BHZFZTpM^E7IJ z%Juf{0d-rwX)+EPSC$sOgTG$^d zQVQm7)j)i1vjw-O4)x!VoQ!SCy?a?+DyP}S`K=#4!x^6VszcOq@QRG`*~KAwfL(mc zk2L`8$rgK_7MLYcBJ?H`Jgzji_-9oU3u)0RgF7qQ_@HloB8BSQmNdh~zTMrLl+d$>1{h4R2#N9r4G4-G|*nLGkmMlZ^;-~Ey3-E%_QogqpjTJWKi(+in_%- zi>Kq_-%1hB^0R{Y!wk0bTSn_R!!@^51~9)t-^zrN(^H>yn34~UQr%@3lOUbfM!)A7 zv5GNqdIP;TvZ6do+^@g;X$CLw$R(o8Z&L=YT-vJlSQK_>mF|xT53lZb@KxU-gB|K5 z$p#X8L56Z!!rnnoz4H8CP}0!36F;aiIc{XGeil61^W~arDk?BY;ft(!>|kcsQ3R6U^WDinV8~?k6+5@+`%`oo=l&Dd8Ax3b#>i zc&aCO=M`tEs_NQv9j)+EmLq(t;^2z<oSW1)tTU#8Lp<* z1_`*E>UYW6knuI}^?HcHadpD>v`V&ylgT(Kw$3}e;*SabgE|b3N70Y-Er4yQ3>RRcNLsK}KQ-QiW&1$~&9L zvrwjnXTc2B>Y%xv>bK6&-g|CC3;{{%q06k9JEP!6#1Z~hKlZjH18mvfN`3Se*?IRs ze9*o}^ga0a%trmSFF`;zBQk(p;Vb}aF|%6CIX)Nz@&<5bLfy;Sd5~J25x9|?v zBkzSEgM+#N2xp2~5;Yyk5ATEzk_k4yoVbT7>9L*hP_NZ7>$m_EdY?Yve!-26tIt)W z+B&_~S+Ty@P}t$)xU}I#DeTkaiB!6TZq$sJ`M)Q0E@G_u{?BEkmYb|eWZh%EWWmzR zq13(a_QQ4+vCuggfFY37Yy6DtFo#cYUEL8L||9k1vYGH;b~F-%*jCYN~^w8`9WgGC!Wf9-sY=yxoG-m@L&(V9E~5>h~s6C)+(#5<9!6&}J`EJmxTbX7x#z@kI< zr%SN#RCHh=sE8`Mm_V|J}RVf}6EyCAvX7#;4}-hq%rs4ouHd zkeQ>$;?uJD{!{hl?VHyA=>a@+1#ba*GY0za4ekhkB0Q#}4MDX=ZpPQYX5O50aX7uj zya4Bpz5X(jmhZA#PeiTw-?fh=``A^soaZ;YphqtD`#9Vp*O3beeqvhs8`Kz*a}l!~ z^XV=YV}dVF(ni$s(?LlzEnTw)D6f9e(IKB0w~Vg?8)Y5Za7ciVDV{psQ@}a=i91E8 z$ye6mVi6NqVJ6fGAHcsP^iW1WNQ?xEACAr?v~P~uoALeF75+G5?ctXTn3>AkM*Hb& zXfxgw3P3LQV2)9`^*FXnf^;xYI>LByYC!{s7d&!#A^Qdt-Zw%DKJQh~w-dA}f{w65 z#9Q`;L1fuf^ke(kW&z(VAex~Tx~k?N?``RY+?DWE)MQ|2wX)M>$7TgES=okN{MT=_d5jNyCMdX{A4Kdgq zKM9a+@K+4jhI5R4L2hC(@&w5S*tRzC#~+tTeC44NzLJvlP3#J$sbxGUvQ;zpK-Y=^ zl1y8`0frpNX2TVEz{l5{1Pv3XWPH;2dJO;M2|cG*g(05WGG=D=AR+_O&e`QTJsRdf zQT6bMV#V!sUsiYDDT1@W{p9byB9H_BrNFBt35YU4&>VQ}0(|@v+j-j!3E~S$TxP@y z18k~-`iZVxNkY0y$Gr720w9HE(~y&n_IVO?=Z}jdiS(r=gR3QyZf{Nn7J}-Gd=Yon zaz&vVnNyf06umlZ4<1$hoorSHU{Ag+0KVpr?&1_@{&3~Dbu`FsTG(83AmPnDh6;e4 zXvUA}LAQ1da{cxRihs!yP3wAyB*Dw0Jo&8y3v{Az^)T7cgT5cMWK}N^^_?RkkYO`I}8Nbj|jC?i_Wb4JKbiqOa2Wu_?GnOO?NlrwM{8U{5=kei*&0B* z#z}Ku7M*AM$3>k}Yl4K9UCs#+#SYGFPkB94T`4a st8D`$KVW3IGc>Q3W&+hYjkAbcV}oUHeqsnX@Dop8+eE8U)8X0w09oS literal 0 HcmV?d00001 diff --git a/rbpf/misc/rbpf_256.png b/rbpf/misc/rbpf_256.png new file mode 100644 index 0000000000000000000000000000000000000000..b8ae83498d1be593b8b47bae090759dd80cec7be GIT binary patch literal 13059 zcmV+eGyKenP)3qMjx-QD;5W6ot~Z{B_H-nZ{<_U*p&`Rr$RZ<#wYXXec5 z2Raz6G12cetTAH}?wx0iS!|8jHo6|%%UNSKw8jhy->F0AfB;X8imUkjoHgcDYs|q3 z@Ar;yTZigH!wUTNi@@vFn4_YF<3zB=Yyg}B^Z{-Ia3VO7=)D_&SAl!#=<9r-INS$# z8Th&W9}?;_fD3?|tuecWgu@!s2e=b>%o?*!ow(O10SHckm8>zVgx7UJd=*#&=m}hH zjoB*L;MW7a^zYTJG1mm|UjQ7c|88fE867P!z29JsS=}1bw~pElIpzBlxYq#u5HP?w z)|lfX0c;$Mg*HFHR={JxbHV0E|E?>ND#Yq1 zu-qYjzYM&s9Ne!Hn(JNUQ%URLCTq;)z+1uQ2iUgQI;=x_XxBzm)@&ef2XLDc`BT82 zZCv1$;VGML0`e*zlp<1JC~&P&3BZTem>e(w_!Tf1__h;isiQtM5m*ga!->2cyR}K| z|22T0q-?fDxzAD@=s5j}!?wUh!RBwU#*7DEaUx6VSnF6Fz{$X_z;)J`4}o7PGIlPr zy?cORPNcVT4dp;z-~emPmw-P3p8_WVhZWE<)FByq01p6@tua%qG2gew47SGf0M;+F z&C7tDRDbi|GTZo4AZDHqybnCDZP%eAbpY0wpU_sR7g6|fiOV1-vLI;xH+ZdbR>w6&3*flt@MJ4=1vJ z9djMa0W7FwyV2$kh{Ccu+A|T+#j$Js%iR1Zi{298cV>b1)55^m<_DNx zM_H8!KoLbtdnf(*kce1gejDf)2H_n?!e`%W=Yn3EaeUq3q%ZdW(h7bmV%;M>(fX>N zsT9-+Ks7}GYs^c)D7@zV5xlq8(ZC@YnwbVX4gB4S+^g-biZ>0eqt8qP9Pmx0^ZHW1?D-Crl9RU!0UQ=M4VVS|0g?tR8p1NVEB02KIC!n>&%0fX`wwfAo6>uuTD( z{|YB^mLfNPwo||!8H`SK{4`h74 zS=sFgPUPB{Y`+|SRQb09@KJF45GQhVigl59B0mPklik!e7kIah-V7o@5<;vo2LQLG zARLo{J)Fpg?e*Kv`hFl%?12v~Ds_r2b|U>M@$DL7_ml9ugyJ51Ay@ZRC-T3va}+*j zjoAr!(TU70XO68g+vDQ`)=csEQ%+=?`c}`V1F**Q2aW=kDBHdO*q_#Dyd*duILC=B zSy3A(AOdRucLAH1c;zI#bzvoy9b25p)!jhu?-^s)XB8osk=%X`aIO<+DwjC7hrkB7 z9RmCXuM95A**KBi;$yA{6qP0b4*A^S?MN|6qo8mg(X+t(d&t5sM<>+OEJq zGB(+jqs>+-PT2V80S=F2P5oeq7f91c)tk+F4_^qOE*afvk)QBF--&3Ez-udPUM(E^U)pQb4Q-p zbLU1AxX#eV#_>ZN8^4Tq^Ifk7$F={^#>UlV&z*a3G_HUcd_1(VaVFl4xDX5mdJJuB zTy1D$}B}ccEJo04MSxa9tN1<8^U%SQB0BoAUi-z~NnB9!~CpxINq& zvsVFbXDzb%INaRr34juTAL8A3yArHojoBkO9RNn?36ch`b|Np-a=rj%jb8!I>Vj+d zXstyM!lfDB-_ePD+FkQOG=z&pVK)j#PhAUKUZdmuMr@2>-GC2)zfm~z&@)ctq>#O= zMF;|>&ckQeEd)(9I=s!$5OwA5*1_i-Oam7?kw?3;TX4?;rjP;nL=da$|9N1qAbH0{ zsyyj|PvE&Xkk>_a8Swp#efTo4sD#d;C$Kd>4q?MG#<^~=5jMapj{qKXB0K1~;xX$0 zGpsRvOF0k=7JyYvSYbdt~_ZOTg3;xXt5> ziEJ2~`?RoTw2l?OyJ&lP;0k=E;(ox@c!#D4nt?xt1hP94fzS%@CE!e@sKbJ1mO7C> z;X<;;+=@3L%?HLfk&pELrN9p|_MyKMS(I`i_rPb9`9{WXJnlrcS7LV!-n#THjw^H*=N@ zJ*PR53sUxVFz{`BmY}RK6rUFY%}l)-KGtEA$W*9SuE~H6-g^_cGz$Fcj(!0R)|i#m zdE!ZY@XxRUTtctd#H^ve2Pq*~j-ZsQhbRHf_|)E6;R*dc5FZy1LEO!`u|VI}!DliO zYs?L`v^aD_tZ$9EONsSq1@QYx?Ef%z)-R%3d>K#va&S9WvjR2%@44^i7X9D zmMv9oC(apOWsPaE#{3RgrMqzf4a(}TTjH7P0!n=$Z7Z{Mh$BgGnw680_G10rE4Bgd zw#E$7#WRYwg)0leK{bfuhk(6`+`lMB+`ktSy*BD7Vx{mwJDLe=3tDT;T0v#$5wxvY z80Un)NBUTTuA>8RB5$bU#A!vIJIoqW=;ZwnA1}}zXioWD4{J;#@SlwBU+P5W=qa#3 z+4-;Oy)nQ%Ys}jz8uk>}-ib`9MIC@D#FQM?B^U{ouHn?k@%veS=xCM*u{xRcT)!DVhiO&l1+V4 zWY(!p$+QR{*VLchp+IvN0Ddub0T+^IAuBm2XMX<$(kVsYs|G2_X+F;5C*=TUJCUI&=3*Q^k!cc~4qW6!x?S1025Za_s_fY#1xwx8 zp&3{?GFxVZQt|(%120=+wy%YgQBT7LYs~lX-hag`0ZsTs8MOp+&}6SE+;|tH(04p* zjoDQb;dCDYfcJ(PlyY-*cIbh(*-x^@^v~$u-IC&&4e?5?T7lz%-_mLuPl9~P@pl$z z0d{gCZ>QwO(j<2o58RJ;D(Y4Q;P@025;{H~TzgW4VYL%kz)_y9lahc@mxftYK{3RzOY9js8;dJE%3oWNr>ANID+z$a-2TTi-ek6f~~Fwyyip}7P&tKxZWCb9>u*C3+{7k%-T-my>_y^A4ZF>)?xvIeqU>i z*%3Inz@vRhrw7UkcU7vmN;LOn&c7{!@83I-r)rI`S5gNMvBO`-hfgNKwpHpwE(P*) zYTBEcvF(*f_6SQ$?g!(w|6`MM3hiCkh5oK&``Yi56E;73`mbDt?^jH((9X@dL6LjE zRZmH}BL~o+13w42r3Au;SYuXM5uMaB;NT0BW*y(gA#wvGADE$?A;44Cmoyu0MKo4%no`WA|BO_Nz33kGIts2~1Co^FA}> z^Ltuj{*yo`mgq^6`IJbqy#^aUg2&2e{|al&3}C7?W-&0(8nZ3g(_=O2_xqQz2>Rl) z)V>A$HDqV4F~b1SXSZ@9lVY{+Cg2mHT;N19Mat>>7XUv>@%YXZH*uDeF{mXCfvP4n zH#hg8bVOqV@DFRuK2GGWG6?KSDrUSK)hh_*e|zy;qU)QI>@HoDgTM!t-3EL;hL>CP_i@&kYn;fd0pa+r z?x{~H?W<8;`CDtQpM?*M=?u;g3xO}H4|_;$1)sjWBOs4g0NxM$qEtYTX482Ocs{IL zV~zP2ZOd3kS$H;O`2-l}Ig!)#WOy~e(L1_8X9t93w4U5IskT4qggOQ9>%T@;4yKB5 zzZ2QLT8)n$F@m>d&Sdn65xeB``3H&+fVuc+zzf67NCoDyzwV045H|dS<`&DuMn< z$Hg1m27EiRQk2;IBWX>gbtAr7a^!~}uVH_SHRcXr`vOgUU)$O`wqS?j{q-N$@13dy zyPqziD~inB#nzbL0AulqLHFteD6OjB{aZ6U1$K)LF8I{D2M50mj8xM7y13xPX>&3_JX65cOiooa~&xdNxfD)`!Y zE86j=4F?e8R37c1K0N6}woN0yT4TOV+ZqJ3@IDSd!Y9|P1U!aMW^xCuiA@2nb0S9- z6GwvIS@J$q6Yh2*dsp)V)|ib+wj4DBTrM}Vfan#6ovblG z4TyOUYs~f5m@BL?gM)%2gIGQj)gaZEr4e-&rNphlXGfwC0f-dG)|jKMF%N|YQM(C% zDjbgP?8sHw>w*saWN+Xv!T&o3r}10Bzm;%~E~D*5z?YoJ@lNDuyt!&&8SV73#{60P z`6<5M9eB%5Bu#^AwK*IA0ZV;KPd##RtfJ0^Fz~=fNbq5*JlP0Arm< zf7QQCFLNxrSYx)pXE^#m^$|N!fpTvRF`4`QTEMImc~+6skzz*#@w^CMA)TGQ5X_Cr zjwyAYPIjvEA(tssDMDCwStuZt;HPNeAuR-~I{+d8E%N4-yL-_ob4tT?f+#TG@ zS8%9vgY^pO@)Bf$1r;Lzfp?nVJ<}#yV$ zE^tZ-w(Tq@@|oK88-PiH*{9ryxsUKwjc(9BZ-}=m|JI3oS8?zcT1%h3r!wp=db+P) z#y1zEPF9-A>=~dB@N2bY->58a65N504((e>q>kHk(;@rpyYH$;eEZu)H-A2#2gv1e zxssc5X;TyL{_lTiMC%udJo+;ya!HZ()*b>dQxC+=_5YiJ&2+uyVjRH#=~~;e(gYxj z&p@sSMa|)D%g2>yV>aGC{lDPdUU=8Yp}-W~OjUJr49r|>;hkM(IFY8!k9`vI>@Epl?4rky)klza4Sq1o}%JJJ}Ah?(5 zslNlQm5{D;B1c(c{zvJ;E;~DshpK_I-dOW_b>*8+U9w#zK`*bg@DfR>(lD;*qF3FRC?SispyD46`QC&e= zxD9W5-c)r`+W<42$hfo(GJTuiLdLnHLwrfUfM?|@;xUqnhpSK%)Y7! z>!H;AHQ-j zz)Q$pBQb)X<2@X&O{u6H=|nzK2c^AqZZ^|7y?qi6h3%Zkf&#uI{qV}<>wpO< zr>B{=4n+%{$bfbc0PV~VDX&mtL5*xwfU(L8f#1C$e8;Y@a3r3YN?Y@1BEUneSwn*cGN|dwi@peRge{0Skv&Ck``B)2<(ZkMqP+) zpNOvrlvWh{g|7XqO1T-pWlrRGDY%jWdfDY(lNY7Q#x(QW=r1jWa+3hTPpd(ZtlLgLFLif8g_|<@}?NsNp zM8)adi|vD0U4#3Tu)WT74;8<^2)~sQM~j`v;ZCHl6FD*Bl8U_>%X+lT ziQJ=HXqrgamZUY1M09c)6)yX$;vtD}jMZYJ(E>k8<;*6%H@%9B1T~OU5Q?^{xqpJd0>&xzJO0sxkCLXg8A-?_x3B6xj3(zy+KcQfOnn9{??e+ ziYTF;RM|DJ-?T~u1_R%ZPR1&YZ-C1EBYaaD)L@mYs|X3X|mYhUs28LLRK&Y_*(2lj@20ak{{`4%pk310`ZZig(*XB^zl?YT3^kb8OI$^(n0_ zKa{razNO#}C$fnXS-2w8XeHG>bRs9}iwn9tPpoF#4~G4V<+BSJo-Z9<8-2?D$$#8kYd zIo`g*d+t36EGS^7FRC?FFqvquvi~kv|nN0-qcg#xm zbQZ5UirZy{@Q{@NOu=i%pHRih%Xs_$K^X@;3vd3Kq;0*R8u3!!AFc~x2Ye}~@fi`- z1pEYVf1jWW=!;7654XlVLV5KERs=NT{nRgVA`c`G$^I&;6f^gI;6&DpWv`j5ZO>4G zodu?)T8PS9-YFE(Y`>UcksUVFQ^Xa1Lb6TsQzGWyqoius>f+;$G3j%L{1In zU`u=%vA?S#@j0@2+I0ok+KD`qw3plMzi*8>p5pZcIuk6yd)Y;K2KT~yg^j`o$LvgP z)j*7kJO|-D!Qw>VIK@#Cz8}U1$4t{x{|=2E=%J_P<~1@0S38m8f^iSVn-9mTuHbt$ zjNfwM-*{K$>3Gl7Hm7(cysU1^5adee6<}@X8g&-ak z__aW+>r+E>Fen0WX1|!;;RdowCW-M8R@YiU3@y zEab8HT!N=)AgE6Qd+5j|R^iK|(lrH-~G(Sd(_?018Aig~1F5 z0Jq0pc~4J~N(Kc?Q5ilgZuucT4*SDWeYeIu%<@S#YZ;n=ZJo%o!5GuB!*lTY1>X(u zn8Oz{`7AbXlUQ;2Bmd$=j*P9hK~7|TEIjy9vRg5_GCc1@+Tg5R96;PE5c~g?0?LT7 z__*gvW%Zqe*HCX2V{vEG8i!B{exO9DjWP*u>)r^j90PBq_z&J6{#GS!pJwcDv-a)N z57bjSA=v!0RlXmO*NDgA`%dKD7?I3J`5a<6j;Gsx1n+e7SOpjC6?p5< z$Fb{AgnbE0+_&+icOpN;nL}wvt#$}Q^aZ!21x(_5`;-aK~%r4p|!W-W7Q`Y z@_h~$aZuIeYcWFjOrS~rKP~I^ipouohUKoyI0avKBAcXnP+fD+)&6CH955(29mA0B z{hi1o`uCRBnCY}lW_o}%<|TYsaz4yOP{0HnRWw=zoa00;)PDaQ?;{d|=tkU|<(0-C zDACxTfO8NlEsUM&YwSjr3&o7eyR&uLl3HTBfkr4%JF)Ihv!y3~gT4W{W?+svoYs^*nvR-K+56laS zyc`>h{gD_RZl-N;$zFI{{0G*UtBRz~0`Gy>0!(xwZw05ytGeM$(4Re=$m_b{PLH*- zrxTfMjoAa;t8o!*ezbhsh1zZ(-7xJ5YMSCi zUhAl%-Wszxt@$c0bRvs``MLrh4-$#_WAJelQ-HIh3R4Z$=vcsl%?T9_!HM!OWl5TV z6P?IY!2PfY$i z7$4~3%KYxs;AASK1RW1NY>l}bZ#O@Mwx+OGJBYeRbFxK7W$l!poZ7cPJ}7AmeoX#)-?7}pc>8pw!k}1M3Ty?w*$u$@csdV zGrl?>pE3CN?a%>?RnaX?KiA6V^Y6tHqMqtRew|^ghvS{u8e%$?G->R9=|s-#bOfLS zPu5-Wi6KkDJ9=79bRr)F@0UXWmZ!ZO9PC8?7T4HYW5(b!I{hpq zAvnc}TvCnr{z`B71l*2QrDK}7f);$?kK4i+kunLuovMFIv#$oQd=On~0uYRKQg4jTC1P+A$;C1S_<<9-Gl4THhX7O)PIe-frr30+;d2l+CgA^XPUPSf zl>w)ML}0jzdxH};{ine1oya)}?UX|R+6!4ZfKd0*h8tv@%0FjDmv|IjXAnN_2Zg=V64R|Tt5zs zh&F$%jWHZ+pgWtR9-0fjV;+bq3wJjH zFc2T|nkG~5k{TG#YQPiLm|+=Dvakqd66~M^X6IVj1lE`@<6Xmlnt}j!WhgbiU4Zwk zF-Q08W(1%Q>C1TMYh+B@053X`ED24Q;N5~tL0^0j(Zm{=i)%@jIHskGuk%?c55udv z8wcmk(}Mc;7l08IjwEXjypbUo$f_)R zREG8svc}vS#ZDB0bw`b&xw&}&$qKSNoydN1`MMT#0Q$QL?-h5aiY|r5xq6kl@l7S* zk$E-B?Ash%%Ljd^ikUG7!;8qTKhdgJSYzmx1V9Hr1UR|KBO6xXvsr?H96rO(0XDD9NZnG7AnrVL@CYwUCAEkZ#+oXkxK?5+=8uOKK z&T47%TVpoCr>sxna;jCXK8s`B$N2E*eeosf_GS45I+-0|)-ugG@QGPU$%+55#_a7x?yfQX zZi!FavQmlp-O`CnT7i9CgwNJnQP^PAsJ=^^o5z)S>_%(M22SMk?kEBrOS(W21MF#y zNy;tItJES(VDW}pW135xB7O9^?~pyERlK6i=b)P(yy-9vQMtMri?qt{DCvsDe8(E| zcEYnw`0PeyaR8b|ExzIN9pO>Btfj@0uEyohtTBJ>js)Ob-5@8H*aU-U%{@>l7=Wz;!#Q3%$rzfw&xK zrZrvH@*1E;iw;r_sL%qL1zZQ57v$j{iXW-4eyQ{vVApsYwR=5krT+3Il z_Skc|T#tM{zekbHAC=_$r4xBFtJ0!N5rRO`wjSPjq(>JS-hEDF&jjMvTlF8~6W;xX z6S=k4HhvM@_Emy*NGIsp8BXL^RUZ4A_yV_uMAJ8c-JHmbU_ErpV4!#z>+f`-&A$wv zT(j-5G;(CLDK^Ku_9iJXe`$^BS*x2r!fo*PVyb8I>x^(5$$F{5sE1&f!mV2pfI!2( zeT~Yw%ZYyad;cggWE)|x)jaeay!kIzgoQmVr8i+`7xl2l{EkjsAt>l)CE8en;>lxI z!+QX>A(GuTA&5G|eAIMbYBrb;hSA#9FGCxoX9^4og`yMj%2-2qk03E>t@JP z+K1t?E-){Dp_2#w1gXi}+tlj#EXes3`2CJGX7xG&@XDd9fw%CQ^I8cJjgUJHU;BNB zC<|~rnd-=O)?xaL88kIFAA-02M_gZ=^J~#|KgQ?g?+V26@nF6>WoUPp@i?FNU5tLG zVf#N{5c&K$_zc#$1g<`sgBE;_%g?&C2vA7LWsO+}pKko%BCq}rxQld-CaEEp%jMQz zfBljD`t?i1N#B0`a?Q=n)1P?kv3q*=>GM)l=RGNla`1Z;K0D4T37>xvpML#aC$gm2 z9MzzXXQ=8mEIzj56uc#*7=9~CEb{q0Lxv1lZP>737u-K_;+gq;zDEI~l+WdI~$

#H(OwiKO!kBf}&8@fIdM%e@k*x|!*@4OR5jFyUN+1iOb zlXL)fYXT4>1gk20RLTQv5%7g5|GU-~9D}bg(;v7gp6|Yj{ayo~^MA2Y_~}}i2d!}P z0O=|pZ{b9q@8UTAB3>z2biV?7Ig$Hf?~cSfaJ}P1W>g!u0S7YxZ^nGriOh>VGg`UP z4N5e3HE?{AqOnc@S|{{)d_A=^@E*Qk=vGQFdea(MwFnrjIR~oQ-$D2)x#N`cSc>-p zm<8OXgkfCD=ALsRTXtn}&l>YuiPJxXuj5l@?Ta@f_Ej7>bG+X4Mv1@Erg2Opd8 ze!$MZhR;>8L5im4>fyGu5Yeg=fYu40f-h!T3D_^HCs4)yZUDSqq2?9<6T-q6T@1H7 z;LDd+0&a04hg7kz5y6^>FVtEI_^}hYxX_J;`quv(J}l#%#rSBwLus3caf=S<@A%jT zJ{S07nQi6pIpVr1e6bzxKCfI|1lx!Dl1f8xO2-|T=Cz~QU5g>Ue)_jk-CiNI z{#U#ycYyx%1+Go1nBB9!{oB}M<8pidBCjn1o{eeF@4{OVtjcbSszt1TJ@nG6PUM~7 zM0!f~6B{QqF(@|nrdYdOlDIpOs|wuvHOVI3lbp!=!MyyC)@;&W#g{g(7K{zJ^F4I) z<3y%dW3C{%U~CKUFDl z&qJL6B*WSi&-l|HxWO7T)*7>hHRf7iC%xAfUxH~!C2Q;jNIW~ZJjxn#rhcOzKGW>l z<;sNJ2udmPerJtYIhgZMiB`30N&3AB!KKTi>uTCPu0#`=f}iR-fMiO%1Ff3lyvmLX z^~B9GZ?^*%;Y~W9Wi=04V;b-#sZ~|H_)$i}{fTk~18EI=zR8Ik+RgDhowmtABUjmY zX!v7zb82G&5%5glCSZ={_$$eBCwQUu#E0u{3;d=4=jrfy4i8{?r=R9npBFii{&fP- z{spo!Ub8+Mcvg`xU6m{cmeJ(<`u-bu_uK#C9gk*3i^m$%TNONOo9Er@6dz6|-xcm>_0DCtIt?+?XW zi+0rC8UC(|J9- zqOX4iKJ$(q8ncvh?4!CFLpleZG;nyKA~0ESTekvahk-TJ*FH%~pzgZ)ON_mD1rNpH zR6qaYpp4^N4(dw4_7i|+%B$s9X;`KwO`HJKxxDmHhyX;P(kmKTs&N?A2|#v;N=|(Q zTmX!9B7-Pyp6Lka2ds(rOq{CMmQp)q&N~D2q%~Z)3Ai3#W$E-4+^?rqPjH^DyX82% zIsxc7_yOKbvnD=&-kW;;sTJ6#X-=fsiOh8(zjY$x@XEejX=}f(LveT>xDoFmI0QJ- zi9CgO`dPdJ`}Lp`c@a3>iL8!SUOtFdShkzu)ghbwxCD5ifTHDb+B%3d16S$^GOj|O z|3DY$VZa)}^{L@Q+P9aZzw3}1w=MXBA#s?Gw+4lCJ`Zoz`9cM|FavMWi|Y=s>J>?TB8DQb|OQa$TYovUx8PCj?b%D zh>%}|Pdm3HcKt0UGR283bRv6e8=nF*oyfFmjjc`qDhqo6hd7Z%G1lzY!83n$B2m)K zKhYXs62VV((Om}|A6$PKAITR6FP#nSq(LI}*{?1ftDM(iG12Ol*mD;G=Q@#_BQ`yN z7o5mXoXFpldkEtVP7T|2BIn|>OpU9Hay8B!SYy_<#=K;Wxz8HYCq$+-W*2KrUVqLC z-ihx2+!`~}8nbVGHaj0~_4lKSzZ(lY^S<6&YK<8id+#W{{(gOvcm2gONL_sk-TQ_$ z=G1tiQ6&U9YfNK(fplsDFwh#aVT8XJ0pHddv)CGQejyxMW5#H1#}$I#{|78TGMeWS RmQDZw002ovPDHLkV1g04Y0v-w literal 0 HcmV?d00001 diff --git a/rbpf/scripts/cargo-for-all-lock-files.sh b/rbpf/scripts/cargo-for-all-lock-files.sh new file mode 100755 index 00000000000000..6aefae127768c3 --- /dev/null +++ b/rbpf/scripts/cargo-for-all-lock-files.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +here="$(dirname "$0")" +cargo="$(readlink -f "${here}/../cargo")" + +set -e + +shifted_args=() +while [[ -n $1 ]]; do + if [[ $1 = -- ]]; then + escape_marker=found + shift + break + elif [[ $1 = "--ignore-exit-code" ]]; then + ignore=1 + shift + else + shifted_args+=("$1") + shift + fi +done + +# When "--" appear at the first and shifted_args is empty, consume it here +# to unambiguously pass and use any other "--" for cargo +if [[ -n $escape_marker && ${#shifted_args[@]} -gt 0 ]]; then + files="${shifted_args[*]}" + for file in $files; do + if [[ $file = "${file%Cargo.lock}" ]]; then + echo "$0: unrecognizable as Cargo.lock path (prepend \"--\"?): $file" >&2 + exit 1 + fi + done + shifted_args=() +else + files="$(git ls-files :**Cargo.lock)" +fi + +for lock_file in $files; do + if [[ -n $CI ]]; then + echo "--- [$lock_file]: cargo " "${shifted_args[@]}" "$@" + fi + + if (set -x && cd "$(dirname "$lock_file")" && cargo "${shifted_args[@]}" "$@"); then + # noop + true + else + failed_exit_code=$? + if [[ -n $ignore ]]; then + echo "$0: WARN: ignoring last cargo command failed exit code as requested:" $failed_exit_code + true + else + exit $failed_exit_code + fi + fi +done diff --git a/rbpf/scripts/increment-cargo-version.sh b/rbpf/scripts/increment-cargo-version.sh new file mode 100755 index 00000000000000..f9ca92a023738f --- /dev/null +++ b/rbpf/scripts/increment-cargo-version.sh @@ -0,0 +1,155 @@ +#!/usr/bin/env bash +set -e + +usage() { + cat </dev/null; then + badTomls+=("$Cargo_toml") + fi + done + if [[ ${#badTomls[@]} -ne 0 ]]; then + echo "Error: Incorrect crate version specified in: ${badTomls[*]}" + exit 1 + fi + exit 0 + ;; +-*) + if [[ $1 =~ ^-[A-Za-z0-9]*$ ]]; then + SPECIAL="$1" + else + echo "Error: Unsupported characters found in $1" + exit 1 + fi + ;; +*) + echo "Error: unknown argument: $1" + usage + ;; +esac + +# Version bumps should occur in their own commit. Disallow bumping version +# in dirty working trees. Gate after arg parsing to prevent breaking the +# `check` subcommand. +( + set +e + if ! git diff --exit-code; then + echo -e "\nError: Working tree is dirty. Commit or discard changes before bumping version." 1>&2 + exit 1 + fi +) + +newVersion="$MAJOR.$MINOR.$PATCH$SPECIAL" + +# Update all the Cargo.toml files +for Cargo_toml in "${Cargo_tomls[@]}"; do + # Set new crate version + ( + set -x + sed -i "$Cargo_toml" -e "0,/^version =/{s/^version = \"[^\"]*\"$/version = \"$newVersion\"/}" + ) + + # Fix up the version references to other internal crates + for crate in "${crates[@]}"; do + ( + set -x + sed -i "$Cargo_toml" -e " + s/^$crate = { *path *= *\"\([^\"]*\)\" *, *version *= *\"[^\"]*\"\(.*\)} *\$/$crate = \{ path = \"\1\", version = \"=$newVersion\"\2\}/ + " + ) + done +done + +# Update all the documentation references +for file in "${markdownFiles[@]}"; do + # Set new crate version + ( + set -x + sed -i "$file" -e "s/$currentVersion/$newVersion/g" + ) +done + +# Update cargo lock files +scripts/cargo-for-all-lock-files.sh tree + +echo "$currentVersion -> $newVersion" + +exit 0 diff --git a/rbpf/scripts/read-cargo-variable.sh b/rbpf/scripts/read-cargo-variable.sh new file mode 100644 index 00000000000000..7f6c95181560ff --- /dev/null +++ b/rbpf/scripts/read-cargo-variable.sh @@ -0,0 +1,14 @@ +# source this file + +readCargoVariable() { + declare variable="$1" + declare Cargo_toml="$2" + + while read -r name equals value _; do + if [[ $name = "$variable" && $equals = = ]]; then + echo "${value//\"/}" + return + fi + done < <(cat "$Cargo_toml") + echo "Unable to locate $variable in $Cargo_toml" 1>&2 +} diff --git a/rbpf/scripts/semver.sh b/rbpf/scripts/semver.sh new file mode 100755 index 00000000000000..e5237a4e2e5d97 --- /dev/null +++ b/rbpf/scripts/semver.sh @@ -0,0 +1,130 @@ +#!/usr/bin/env sh + +function semverParseInto() { + local RE='[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z-]*\)' + #MAJOR + eval $2=`echo $1 | sed -e "s#$RE#\1#"` + #MINOR + eval $3=`echo $1 | sed -e "s#$RE#\2#"` + #MINOR + eval $4=`echo $1 | sed -e "s#$RE#\3#"` + #SPECIAL + eval $5=`echo $1 | sed -e "s#$RE#\4#"` +} + +function semverEQ() { + local MAJOR_A=0 + local MINOR_A=0 + local PATCH_A=0 + local SPECIAL_A=0 + + local MAJOR_B=0 + local MINOR_B=0 + local PATCH_B=0 + local SPECIAL_B=0 + + semverParseInto $1 MAJOR_A MINOR_A PATCH_A SPECIAL_A + semverParseInto $2 MAJOR_B MINOR_B PATCH_B SPECIAL_B + + if [ $MAJOR_A -ne $MAJOR_B ]; then + return 1 + fi + + if [ $MINOR_A -ne $MINOR_B ]; then + return 1 + fi + + if [ $PATCH_A -ne $PATCH_B ]; then + return 1 + fi + + if [[ "_$SPECIAL_A" != "_$SPECIAL_B" ]]; then + return 1 + fi + + + return 0 + +} + +function semverLT() { + local MAJOR_A=0 + local MINOR_A=0 + local PATCH_A=0 + local SPECIAL_A=0 + + local MAJOR_B=0 + local MINOR_B=0 + local PATCH_B=0 + local SPECIAL_B=0 + + semverParseInto $1 MAJOR_A MINOR_A PATCH_A SPECIAL_A + semverParseInto $2 MAJOR_B MINOR_B PATCH_B SPECIAL_B + + if [ $MAJOR_A -lt $MAJOR_B ]; then + return 0 + fi + + if [[ $MAJOR_A -le $MAJOR_B && $MINOR_A -lt $MINOR_B ]]; then + return 0 + fi + + if [[ $MAJOR_A -le $MAJOR_B && $MINOR_A -le $MINOR_B && $PATCH_A -lt $PATCH_B ]]; then + return 0 + fi + + if [[ "_$SPECIAL_A" == "_" ]] && [[ "_$SPECIAL_B" == "_" ]] ; then + return 1 + fi + if [[ "_$SPECIAL_A" == "_" ]] && [[ "_$SPECIAL_B" != "_" ]] ; then + return 1 + fi + if [[ "_$SPECIAL_A" != "_" ]] && [[ "_$SPECIAL_B" == "_" ]] ; then + return 0 + fi + + if [[ "_$SPECIAL_A" < "_$SPECIAL_B" ]]; then + return 0 + fi + + return 1 + +} + +function semverGT() { + semverEQ $1 $2 + local EQ=$? + + semverLT $1 $2 + local LT=$? + + if [ $EQ -ne 0 ] && [ $LT -ne 0 ]; then + return 0 + else + return 1 + fi +} + +if [ "___semver.sh" == "___`basename $0`" ]; then + +MAJOR=0 +MINOR=0 +PATCH=0 +SPECIAL="" + +semverParseInto $1 MAJOR MINOR PATCH SPECIAL +echo "$1 -> M: $MAJOR m:$MINOR p:$PATCH s:$SPECIAL" + +semverParseInto $2 MAJOR MINOR PATCH SPECIAL +echo "$2 -> M: $MAJOR m:$MINOR p:$PATCH s:$SPECIAL" + +semverEQ $1 $2 +echo "$1 == $2 -> $?." + +semverLT $1 $2 +echo "$1 < $2 -> $?." + +semverGT $1 $2 +echo "$1 > $2 -> $?." + +fi diff --git a/rbpf/src/aarch64.rs b/rbpf/src/aarch64.rs new file mode 100644 index 00000000000000..d6869d9a45b894 --- /dev/null +++ b/rbpf/src/aarch64.rs @@ -0,0 +1,1015 @@ +#![allow(clippy::arithmetic_side_effects)] +#![allow(clippy::upper_case_acronyms)] +#![allow(dead_code)] +use crate::{ + jit::{JitCompiler, OperandSize}, + vm::ContextObject, +}; + +macro_rules! exclude_operand_sizes { + ($size:expr, $($to_exclude:path)|+ $(,)?) => { + debug_assert!(match $size { + $($to_exclude)|+ => false, + _ => true, + }); + } +} + +pub const X0: u8 = 0; +pub const X1: u8 = 1; +pub const X2: u8 = 2; +pub const X3: u8 = 3; +pub const X4: u8 = 4; +pub const X5: u8 = 5; +pub const X6: u8 = 6; +pub const X7: u8 = 7; +pub const X8: u8 = 8; +pub const XR: u8 = X8; +pub const X9: u8 = 9; +pub const X10: u8 = 10; +pub const X11: u8 = 11; +pub const X12: u8 = 12; +pub const X13: u8 = 13; +pub const X14: u8 = 14; +pub const X15: u8 = 15; + +// There are more registers, but I'm not sure we have a use for them +// NOTE: x18 is reserved on Apple platforms + +pub const X19: u8 = 19; +pub const X20: u8 = 20; +pub const X21: u8 = 21; +pub const X22: u8 = 22; +pub const X23: u8 = 23; +pub const X24: u8 = 24; +pub const X25: u8 = 25; +pub const X26: u8 = 26; +pub const X27: u8 = 27; +pub const X28: u8 = 28; +pub const FP: u8 = 29; // NOTE: On Apple platforms, they say FP must always address a valid frame record +pub const LR: u8 = 30; +pub const SP_XZR: u8 = 31; // SP or XZR, depending on context + +// Tested on Linux, macOS, but not on Windows +pub const ARGUMENT_REGISTERS: [u8; 8] = [X0, X1, X2, X3, X4, X5, X6, X7]; +pub const CALLER_SAVED_REGISTERS: [u8; 7] = [X9, X10, X11, X12, X13, X14, X15]; +pub const CALLEE_SAVED_REGISTERS: [u8; 12] = + [X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, LR, FP]; + +#[derive(Copy, Clone, PartialEq, Eq)] +#[repr(u8)] +pub enum ShiftType { + LSL = 0, // logical shift left + // LSR = 1, // logical shift right + // ASR = 2, // arithmetic shift right + // ROR = 3, // rotate right +} + +#[derive(Copy, Clone, PartialEq, Eq)] +#[repr(u8)] +pub enum Condition { + EQ = 0, // equal + NE = 1, // not equal + CS = 2, // carry set, also HS (unsigned >=) + CC = 3, // carry clear, also LO (unsigned <) + HI = 8, // unsigned > + LS = 9, // unsigned <= + GE = 10, // signed >= + LT = 11, // signed < + GT = 12, // signed > + LE = 13, // signed <= + // AL = 14, // always +} + +impl Condition { + // Aliases + pub const HS: Condition = Condition::CS; // unsigned >= + pub const LO: Condition = Condition::CC; // unsigned < +} + +pub struct ARM64BitwiseImm { + immr: u8, // 6 bits + imms: u8, // 6 bits + n: u8, // 1 bit +} + +impl ARM64BitwiseImm { + pub const ONE: ARM64BitwiseImm = ARM64BitwiseImm { + immr: 0, + imms: 0, + n: 1, + }; +} + +#[derive(PartialEq, Eq, Copy, Clone)] +pub enum ARM64MemoryOperand { + #[allow(dead_code)] + OffsetScaled(u16), // ldr dst, [src, #offset] (unsigned offset, scaled) + Offset(i16), // ldur dst, [src, #offset] (signed offset, unscaled) + OffsetPreIndex(i16), // ldr dst, [src, #offset]! (signed offset, unscaled) **autoincrement** + OffsetPostIndex(i16), // ldr dst, [src], #offset (signed offset, unscaled) **autoincrement** + OffsetIndexShift(u8, bool), // ldr dst, [src, idx << 3]; u8 is idx register, bool is whether to shift +} + +// Instructions are broken up based on the encoding scheme used +#[derive(Copy, Clone)] +pub enum ARM64Instruction { + LogicalRegister(ARM64InstructionLogicalShiftedRegister), + AddSubRegister(ARM64InstructionLogicalShiftedRegister), + AddSubImm(ARM64InstructionAddSubImm), + ConditionalBranch(ARM64InstructionConditonalBranch), + LogicalImm(ARM64InstructionLogicalImm), + BitfieldImm(ARM64InstructionLogicalImm), + MovWideImm(ARM64InstructionWideImm), + DataProcessing1Src(ARM64InstructionDataProcessing), + DataProcessing2Src(ARM64InstructionDataProcessing), + DataProcessing3Src(ARM64InstructionDataProcessing), + BranchImm26(ARM64InstructionImm26), + BLR(ARM64InstructionBLR), + Load(ARM64InstructionLoadStore), + Store(ARM64InstructionLoadStore), + RET, +} + +#[derive(Copy, Clone)] +pub struct ARM64InstructionLogicalShiftedRegister { + pub size: OperandSize, + pub opcode: u8, // 2 bits + pub n: u8, // negation (1 bit) + pub shift_type: ShiftType, // 2 bits + pub dest: u8, // Rd, 5 bits + pub src1: u8, // Rn, 5 bits + pub src2: u8, // Rm, 5 bits + pub imm6: u8, // shift amount (0-31 or 0-63) +} + +impl Default for ARM64InstructionLogicalShiftedRegister { + fn default() -> Self { + Self { + size: OperandSize::S64, + opcode: 0, + n: 0, + shift_type: ShiftType::LSL, + dest: 0, + src1: 0, + src2: 0, + imm6: 0, + } + } +} + +#[derive(Copy, Clone)] +pub struct ARM64InstructionDataProcessing { + pub size: OperandSize, + pub opcode: u8, // 6 bits + pub dest: u8, // Rd, 5 bits + pub src1: u8, // Rn, 5 bits + pub src2: u8, // Rm, 5 bits, only used in 2, 3-src insts + pub src3: u8, // R1, 5 bits, only used in 3-src insts + pub o0: u8, // 1 bit, used only in 3-src insts +} + +impl Default for ARM64InstructionDataProcessing { + fn default() -> Self { + Self { + size: OperandSize::S64, + opcode: 0, + dest: 0, + src1: 0, + src2: 0, + src3: 0, + o0: 0, + } + } +} + +#[derive(Copy, Clone)] +pub struct ARM64InstructionAddSubImm { + pub size: OperandSize, + pub opcode: u8, // 1 bit + pub sets_flags: u8, // 1 bit + pub shift_mode: u8, // 00 (LSL 0) and 01 (LSL 12) are supported + pub dest: u8, // Rd, 5 bits + pub src: u8, // Rn, 5 bits + pub imm12: u16, // unsigned imm12 +} + +impl Default for ARM64InstructionAddSubImm { + fn default() -> Self { + Self { + size: OperandSize::S64, + opcode: 0, + sets_flags: 0, + shift_mode: 0, + dest: 0, + src: 0, + imm12: 0, + } + } +} + +#[derive(Copy, Clone)] +pub struct ARM64InstructionLogicalImm { + pub size: OperandSize, + pub opcode: u8, // 2 bits + pub n: u8, // negation (1 bit) + pub dest: u8, // Rd, 5 bits + pub src: u8, // Rn, 5 bits + pub immr: u8, // imm6 + pub imms: u8, // imm6 +} + +impl Default for ARM64InstructionLogicalImm { + fn default() -> Self { + Self { + size: OperandSize::S64, + opcode: 0, + n: 0, + dest: 0, + src: 0, + immr: 0, + imms: 0, + } + } +} + +#[derive(Copy, Clone)] +pub struct ARM64InstructionWideImm { + pub size: OperandSize, + pub opcode: u8, // 2 bits + pub hw: u8, // shift (0, 16, 32, 48), encoded as 2-bits + pub dest: u8, // Rd, 5 bits + pub imm16: u16, // imm6 +} + +impl Default for ARM64InstructionWideImm { + fn default() -> Self { + Self { + size: OperandSize::S64, + opcode: 0, + hw: 0, + dest: 0, + imm16: 0, + } + } +} + +#[derive(Copy, Clone)] +pub struct ARM64InstructionConditonalBranch { + pub cond: u8, // 4 bits + pub imm19: i32, // offset from current instruction, divided by 4 +} + +#[derive(Copy, Clone)] +pub struct ARM64InstructionImm26 { + pub opcode: u8, // 6 bits + pub imm26: i32, // offset from current instruction, divided by 4 +} + +#[derive(Copy, Clone)] +pub struct ARM64InstructionBLR { + pub target: u8, // 5 bit target register +} + +// Load + +#[derive(Copy, Clone)] +pub struct ARM64InstructionLoadStore { + pub size: OperandSize, + pub data: u8, // Rt, 5 bits + pub base: u8, // Rn, 5 bits (base register) + pub mem: ARM64MemoryOperand, +} + +impl Default for ARM64InstructionLoadStore { + fn default() -> Self { + Self { + size: OperandSize::S64, + data: 0, + base: 0, + mem: ARM64MemoryOperand::Offset(0), // default to an LDUR (no autoincrement) + } + } +} + +impl ARM64Instruction { + pub fn emit(&self, jit: &mut JitCompiler) { + let mut ins: u32 = 0; + + match self { + ARM64Instruction::LogicalRegister(s) | ARM64Instruction::AddSubRegister(s) => { + ins |= (s.dest & 0b11111) as u32; + ins |= ((s.src1 & 0b11111) as u32) << 5; + ins |= ((s.src2 & 0b11111) as u32) << 16; + ins |= ((s.imm6 & 0b111111) as u32) << 10; + ins |= ((s.n & 0b1) as u32) << 21; + ins |= (s.shift_type as u32) << 22; + ins |= ((s.opcode & 0b11) as u32) << 29; + + match self { + ARM64Instruction::LogicalRegister(_) => ins |= 0b01010u32 << 24, + ARM64Instruction::AddSubRegister(_) => ins |= 0b01011u32 << 24, + _ => unreachable!(), + }; + + let sf: u8 = match s.size { + OperandSize::S64 => 1, + _ => 0, + }; + + ins |= (sf as u32) << 31; + } + ARM64Instruction::DataProcessing2Src(s) | ARM64Instruction::DataProcessing1Src(s) => { + ins |= (s.dest & 0b11111) as u32; + ins |= ((s.src1 & 0b11111) as u32) << 5; + ins |= ((s.src2 & 0b11111) as u32) << 16; + ins |= ((s.opcode & 0b111111) as u32) << 10; + if let ARM64Instruction::DataProcessing1Src(_) = self { + ins |= 0b1u32 << 30 + } + ins |= 0b11010110u32 << 21; + let sf: u8 = match s.size { + OperandSize::S64 => 1, + _ => 0, + }; + + ins |= (sf as u32) << 31; + } + ARM64Instruction::DataProcessing3Src(s) => { + ins |= (s.dest & 0b11111) as u32; + ins |= ((s.src1 & 0b11111) as u32) << 5; + ins |= ((s.src2 & 0b11111) as u32) << 16; + ins |= ((s.src3 & 0b11111) as u32) << 10; + ins |= ((s.opcode & 0b111) as u32) << 21; + ins |= ((s.o0 & 0b1) as u32) << 15; + + ins |= 0b11011u32 << 24; + let sf: u8 = match s.size { + OperandSize::S64 => 1, + _ => 0, + }; + + ins |= (sf as u32) << 31; + } + ARM64Instruction::AddSubImm(s) => { + ins |= (s.dest & 0b11111) as u32; + ins |= ((s.src & 0b11111) as u32) << 5; + ins |= ((s.imm12 & 0b111111111111) as u32) << 10; + ins |= (s.shift_mode as u32) << 22; + ins |= ((s.sets_flags & 0b1) as u32) << 29; + ins |= ((s.opcode & 0b1) as u32) << 30; + + match self { + ARM64Instruction::AddSubImm(_) => ins |= 0b10001u32 << 24, + _ => unreachable!(), + }; + + let sf: u8 = match s.size { + OperandSize::S64 => 1, + _ => 0, + }; + + ins |= (sf as u32) << 31; + } + ARM64Instruction::LogicalImm(s) | ARM64Instruction::BitfieldImm(s) => { + ins |= (s.dest & 0b11111) as u32; + ins |= ((s.src & 0b11111) as u32) << 5; + ins |= ((s.imms & 0b111111) as u32) << 10; + ins |= ((s.immr & 0b111111) as u32) << 16; + ins |= ((s.n & 0b1) as u32) << 22; + ins |= ((s.opcode & 0b11) as u32) << 29; + + match self { + ARM64Instruction::LogicalImm(_) => ins |= 0b100100u32 << 23, + ARM64Instruction::BitfieldImm(_) => ins |= 0b100110u32 << 23, + _ => unreachable!(), + }; + + let sf: u8 = match s.size { + OperandSize::S64 => 1, + _ => 0, + }; + + ins |= (sf as u32) << 31; + } + ARM64Instruction::MovWideImm(s) => { + ins |= (s.dest & 0b11111) as u32; + ins |= (s.imm16 as u32) << 5; + ins |= ((s.hw & 0b11) as u32) << 21; + ins |= ((s.opcode & 0b11) as u32) << 29; + + match self { + ARM64Instruction::MovWideImm(_) => ins |= 0b100101u32 << 23, + _ => unreachable!(), + }; + + let sf: u8 = match s.size { + OperandSize::S64 => 1, + _ => 0, + }; + + ins |= (sf as u32) << 31; + } + ARM64Instruction::ConditionalBranch(s) => { + ins |= (s.cond & 0b1111) as u32; + ins |= ((s.imm19 as u32) & ((1u32 << 19) - 1u32)) << 5; + ins |= 0b01010100u32 << 24; + } + ARM64Instruction::BranchImm26(s) => { + ins |= (s.imm26 as u32) & ((1u32 << 26) - 1u32); + ins |= (s.opcode as u32) << 26; + } + ARM64Instruction::BLR(s) => { + ins |= 0b11010110001111110000000000000000u32; + ins |= ((s.target & 0b11111) as u32) << 5; + } + ARM64Instruction::RET => { + ins = 0xd65f03c0; + } + ARM64Instruction::Load(s) | ARM64Instruction::Store(s) => { + ins |= (s.data & 0b11111) as u32; + ins |= ((s.base & 0b11111) as u32) << 5; + let mode = match s.mem { + ARM64MemoryOperand::OffsetPreIndex(_) => 0b11, + ARM64MemoryOperand::OffsetPostIndex(_) => 0b01, + ARM64MemoryOperand::OffsetScaled(_) => 0b00, // spot used for imm12, + ARM64MemoryOperand::OffsetIndexShift(_, _) => 0b10, + ARM64MemoryOperand::Offset(_) => 0b00, + }; + ins |= (mode as u32) << 10; + + // Encode the memory operand + match s.mem { + ARM64MemoryOperand::OffsetPreIndex(imm9) + | ARM64MemoryOperand::OffsetPostIndex(imm9) + | ARM64MemoryOperand::Offset(imm9) => { + ins |= ((imm9 & 0b111111111) as u32) << 12; + } + ARM64MemoryOperand::OffsetScaled(imm12) => { + ins |= ((imm12 & 0b111111111111) as u32) << 10; + } + ARM64MemoryOperand::OffsetIndexShift(idx_reg, should_shift) => { + if should_shift { + ins |= 0b1u32 << 12; + } + ins |= 0b011u32 << 13; + ins |= ((idx_reg & 0b11111) as u32) << 16; + } + }; + + // Opcode (we choose the zero-extending version for all) + match s.mem { + ARM64MemoryOperand::OffsetPreIndex(_) + | ARM64MemoryOperand::OffsetPostIndex(_) + | ARM64MemoryOperand::Offset(_) => { + ins |= (if matches!(self, ARM64Instruction::Load(_)) { + 0b111000010u32 + } else { + 0b111000000 + }) << 21; + } + ARM64MemoryOperand::OffsetScaled(_) => { + ins |= (if matches!(self, ARM64Instruction::Load(_)) { + 0b11100101u32 + } else { + 0b11100100 + }) << 22; + } + ARM64MemoryOperand::OffsetIndexShift(_, _) => { + ins |= (if matches!(self, ARM64Instruction::Load(_)) { + 0b111000011u32 + } else { + 0b111000001 + }) << 21; + } + }; + + // Encode size + let size: u32 = match s.size { + OperandSize::S64 => 0b11, + OperandSize::S32 => 0b10, + OperandSize::S16 => 0b01, + OperandSize::S8 => 0b00, + OperandSize::S0 => panic!("bad operand size"), + }; + ins |= size << 30; + } + } + + jit.emit::(ins); + } + + /// Move source to destination + #[must_use] + pub fn mov(size: OperandSize, source: u8, destination: u8) -> Self { + // mov is same as ORR , XZR, + Self::LogicalRegister(ARM64InstructionLogicalShiftedRegister { + size, + opcode: 1, + n: 0, + dest: destination, + src1: SP_XZR, + src2: source, + ..ARM64InstructionLogicalShiftedRegister::default() + }) + } + + #[must_use] + pub fn orr(size: OperandSize, source: u8, destination: u8) -> Self { + Self::LogicalRegister(ARM64InstructionLogicalShiftedRegister { + size, + opcode: 1, + n: 0, + dest: destination, + src1: destination, + src2: source, + ..ARM64InstructionLogicalShiftedRegister::default() + }) + } + + #[must_use] + pub fn and(size: OperandSize, src1: u8, src2: u8, destination: u8) -> Self { + Self::LogicalRegister(ARM64InstructionLogicalShiftedRegister { + size, + opcode: 0, + n: 0, + dest: destination, + src1, + src2, + ..ARM64InstructionLogicalShiftedRegister::default() + }) + } + + #[must_use] + pub fn eor(size: OperandSize, source: u8, destination: u8) -> Self { + Self::LogicalRegister(ARM64InstructionLogicalShiftedRegister { + size, + opcode: 2, + n: 0, + dest: destination, + src1: destination, + src2: source, + ..ARM64InstructionLogicalShiftedRegister::default() + }) + } + + #[must_use] + pub fn tst(size: OperandSize, source: u8, destination: u8) -> Self { + Self::LogicalRegister(ARM64InstructionLogicalShiftedRegister { + size, + opcode: 3, + n: 0, + dest: SP_XZR, // discard result + src1: destination, + src2: source, + ..ARM64InstructionLogicalShiftedRegister::default() + }) + } + + #[must_use] + pub fn tst_imm(source: u8, imm: ARM64BitwiseImm) -> Self { + Self::LogicalImm(ARM64InstructionLogicalImm { + size: OperandSize::S64, + opcode: 3, + n: imm.n, + immr: imm.immr, + imms: imm.imms, + dest: SP_XZR, // discard result + src: source, + }) + } + + // Here we implement the "shifted register" variant of ADD and SUB. + // + // There is also exists an "extended register" variant, which includes a zero/sign-extend of the 2nd + // source register. Implementing this variant instead of using a separate instruction for + // the extension might have performance benefits for some BPF instructions. + + #[must_use] + pub fn add(size: OperandSize, src1: u8, src2: u8, destination: u8) -> Self { + Self::AddSubRegister(ARM64InstructionLogicalShiftedRegister { + size, + opcode: 0, + n: 0, + dest: destination, + src1, + src2, + ..ARM64InstructionLogicalShiftedRegister::default() + }) + } + + #[must_use] + pub fn add_imm(size: OperandSize, src: u8, imm12: u16, destination: u8) -> Self { + debug_assert!(imm12 < (1u16 << 12)); + Self::AddSubImm(ARM64InstructionAddSubImm { + size, + opcode: 0, + dest: destination, + src, + imm12, + ..ARM64InstructionAddSubImm::default() + }) + } + + // destination -= source + #[must_use] + pub fn sub(size: OperandSize, src1: u8, src2: u8, destination: u8) -> Self { + Self::AddSubRegister(ARM64InstructionLogicalShiftedRegister { + size, + opcode: 2, + n: 0, + dest: destination, + src1, + src2, + ..ARM64InstructionLogicalShiftedRegister::default() + }) + } + + #[must_use] + pub fn sub_imm(size: OperandSize, src: u8, imm12: u16, destination: u8) -> Self { + debug_assert!(imm12 < (1u16 << 12)); + Self::AddSubImm(ARM64InstructionAddSubImm { + size, + opcode: 1, + dest: destination, + src, + imm12, + ..ARM64InstructionAddSubImm::default() + }) + } + + // destination <=> source + #[must_use] + pub fn cmp(size: OperandSize, source: u8, destination: u8) -> Self { + Self::AddSubRegister(ARM64InstructionLogicalShiftedRegister { + size, + opcode: 3, + n: 0, + dest: SP_XZR, + src1: destination, + src2: source, + ..ARM64InstructionLogicalShiftedRegister::default() + }) + } + + #[must_use] + pub fn cmp_imm(size: OperandSize, src: u8, imm12: u16) -> Self { + debug_assert!(imm12 < (1u16 << 12)); + Self::AddSubImm(ARM64InstructionAddSubImm { + size, + opcode: 1, + sets_flags: 1, + dest: SP_XZR, + src, + imm12, + ..ARM64InstructionAddSubImm::default() + }) + } + + #[must_use] + pub fn zero_extend_to_u64(from_size: OperandSize, source: u8, destination: u8) -> Self { + match from_size { + // UXTB + OperandSize::S8 => Self::BitfieldImm(ARM64InstructionLogicalImm { + size: OperandSize::S32, + opcode: 2, + immr: 0, + imms: 7, + dest: destination, + src: source, + ..ARM64InstructionLogicalImm::default() + }), + // UXTH + OperandSize::S16 => Self::BitfieldImm(ARM64InstructionLogicalImm { + size: OperandSize::S32, + opcode: 2, + immr: 0, + imms: 15, + dest: destination, + src: source, + ..ARM64InstructionLogicalImm::default() + }), + OperandSize::S32 => Self::mov(OperandSize::S32, source, destination), // 32-bit ops clear the upper bits + OperandSize::S0 | OperandSize::S64 => { + panic!("zero_extend is only valid on S8, S16, and S32") + } + } + } + + #[must_use] + pub fn sign_extend_to_i64(from_size: OperandSize, source: u8, destination: u8) -> Self { + match from_size { + // SXTB + OperandSize::S8 => Self::BitfieldImm(ARM64InstructionLogicalImm { + size: OperandSize::S64, + n: 1, + opcode: 0, + immr: 0, + imms: 7, + dest: destination, + src: source, + }), + // SXTH + OperandSize::S16 => Self::BitfieldImm(ARM64InstructionLogicalImm { + size: OperandSize::S64, + n: 1, + opcode: 0, + immr: 0, + imms: 15, + dest: destination, + src: source, + }), + // SXTW + OperandSize::S32 => Self::BitfieldImm(ARM64InstructionLogicalImm { + size: OperandSize::S64, + n: 1, + opcode: 0, + immr: 0, + imms: 31, + dest: destination, + src: source, + }), + OperandSize::S0 | OperandSize::S64 => { + panic!("zero_extend is only valid on S8, S16, and S32") + } + } + } + + #[must_use] + pub fn lsl_imm(source: u8, shift_imm: u8, destination: u8) -> Self { + debug_assert!(shift_imm > 0 && shift_imm < 64); + Self::BitfieldImm(ARM64InstructionLogicalImm { + size: OperandSize::S64, + n: 1, + opcode: 2, + immr: (-(shift_imm as i8)).rem_euclid(64) as u8, + imms: 63 - shift_imm, + dest: destination, + src: source, + }) + } + + #[must_use] + pub fn lsl_reg(size: OperandSize, src: u8, shift: u8, destination: u8) -> Self { + Self::DataProcessing2Src(ARM64InstructionDataProcessing { + size, + opcode: 0b001000, + dest: destination, + src1: src, + src2: shift, + ..ARM64InstructionDataProcessing::default() + }) + } + + #[must_use] + pub fn lsr_imm(source: u8, shift_imm: u8, destination: u8) -> Self { + debug_assert!(shift_imm > 0 && shift_imm < 64); + Self::BitfieldImm(ARM64InstructionLogicalImm { + size: OperandSize::S64, + n: 1, + opcode: 2, + immr: shift_imm, + imms: 0b111111, + dest: destination, + src: source, + }) + } + + #[must_use] + pub fn lsr_reg(size: OperandSize, src: u8, shift: u8, destination: u8) -> Self { + Self::DataProcessing2Src(ARM64InstructionDataProcessing { + size, + opcode: 0b001001, + dest: destination, + src1: src, + src2: shift, + ..ARM64InstructionDataProcessing::default() + }) + } + + #[must_use] + pub fn asr_reg(size: OperandSize, src: u8, shift: u8, destination: u8) -> Self { + Self::DataProcessing2Src(ARM64InstructionDataProcessing { + size, + opcode: 0b001010, + dest: destination, + src1: src, + src2: shift, + ..ARM64InstructionDataProcessing::default() + }) + } + + #[must_use] + pub fn rev(size: OperandSize, src: u8, destination: u8) -> Self { + Self::DataProcessing1Src(ARM64InstructionDataProcessing { + size, + opcode: match size { + OperandSize::S16 => 0b1, + OperandSize::S32 => 0b10, + OperandSize::S64 => 0b11, + _ => panic!("bad operand size for rev"), + }, + dest: destination, + src1: src, + src2: 0, + ..ARM64InstructionDataProcessing::default() + }) + } + // conditional branch + // WARNING: You need to divide the byte offset by 4 before passing as imm19 + #[must_use] + pub fn b_cond(cond: Condition, imm19: i32) -> Self { + Self::ConditionalBranch(ARM64InstructionConditonalBranch { + cond: cond as u8, + imm19, + }) + } + + // call + #[must_use] + pub fn bl(imm26: i32) -> Self { + Self::BranchImm26(ARM64InstructionImm26 { + opcode: 0b100101, + imm26, + }) + } + + // call + #[must_use] + pub fn blr(target: u8) -> Self { + Self::BLR(ARM64InstructionBLR { target }) + } + + // jump + #[must_use] + pub fn b(imm26: i32) -> Self { + Self::BranchImm26(ARM64InstructionImm26 { + opcode: 0b000101, + imm26, + }) + } + + #[must_use] + pub fn ret() -> Self { + Self::RET + } + + // movk (64-bit) + #[must_use] + pub fn movk(destination: u8, shift_16: u8, immediate: u16) -> Self { + debug_assert!((0..4).contains(&shift_16)); + Self::MovWideImm(ARM64InstructionWideImm { + size: OperandSize::S64, + dest: destination, + hw: shift_16, + imm16: immediate, + opcode: 3, + }) + } + + // movn (64-bit) + #[must_use] + pub fn movn(destination: u8, shift_16: u8, immediate: u16) -> Self { + debug_assert!((0..4).contains(&shift_16)); + Self::MovWideImm(ARM64InstructionWideImm { + size: OperandSize::S64, + dest: destination, + hw: shift_16, + imm16: immediate, + opcode: 0, + }) + } + + // mvn (bitwise NOT) + #[must_use] + pub fn mvn(size: OperandSize, source: u8, destination: u8) -> Self { + Self::LogicalRegister(ARM64InstructionLogicalShiftedRegister { + size, + opcode: 1, + n: 1, + dest: destination, + src1: SP_XZR, + src2: source, + ..ARM64InstructionLogicalShiftedRegister::default() + }) + } + + /// Load data from [source + offset] + #[must_use] + pub fn load(size: OperandSize, source: u8, indirect: ARM64MemoryOperand, data: u8) -> Self { + exclude_operand_sizes!(size, OperandSize::S0); + match indirect { + ARM64MemoryOperand::OffsetPreIndex(_) | ARM64MemoryOperand::OffsetPostIndex(_) => { + // in arm64, loads with writeback to the base register cannot also use this + // register as the dest + debug_assert_ne!(source, data); + } + _ => {} + } + Self::Load(ARM64InstructionLoadStore { + size, + data, + base: source, + mem: indirect, + }) + } + + #[must_use] + pub fn store(size: OperandSize, data: u8, source: u8, indirect: ARM64MemoryOperand) -> Self { + exclude_operand_sizes!(size, OperandSize::S0); + match indirect { + ARM64MemoryOperand::OffsetPreIndex(_) | ARM64MemoryOperand::OffsetPostIndex(_) => { + // in arm64, loads with writeback to the base register cannot also use this + // register as the dest + debug_assert_ne!(source, data); + } + _ => {} + } + Self::Store(ARM64InstructionLoadStore { + size, + data, + base: source, + mem: indirect, + }) + } + + // Important: We have to maintain 16-byte SP alignment (enforced in hardware, at least on Apple + // platforms). To allow for minimal changes from the x86 code, we still want an 8-byte push, + // but the trade-off is that we have to allocate 16 bytes on the stack for every 8 byte push. + // + // A more efficient approach in many circumstances is to manually move SP down and load/store + // as desired. + #[must_use] + pub fn push64(reg: u8) -> Self { + debug_assert_ne!(SP_XZR, reg); + Self::Store(ARM64InstructionLoadStore { + size: OperandSize::S64, + data: reg, + base: SP_XZR, // this is SP in this context + mem: ARM64MemoryOperand::OffsetPreIndex(-16), + }) + } + + #[must_use] + pub fn pop64(reg: u8) -> Self { + debug_assert_ne!(SP_XZR, reg); + Self::Load(ARM64InstructionLoadStore { + size: OperandSize::S64, + data: reg, + base: SP_XZR, // this is SP in this context + mem: ARM64MemoryOperand::OffsetPostIndex(16), + }) + } + + // multiply-add + #[must_use] + pub fn madd(size: OperandSize, src1: u8, src2: u8, src3: u8, destination: u8) -> Self { + Self::DataProcessing3Src(ARM64InstructionDataProcessing { + size, + opcode: 0b000, + dest: destination, + src1, + src2, + src3, + ..ARM64InstructionDataProcessing::default() + }) + } + + // multiply-sub + #[must_use] + pub fn msub(size: OperandSize, src1: u8, src2: u8, src3: u8, destination: u8) -> Self { + Self::DataProcessing3Src(ARM64InstructionDataProcessing { + size, + opcode: 0b000, + dest: destination, + src1, + src2, + src3, + o0: 1, + }) + } + + #[must_use] + pub fn udiv(size: OperandSize, src1: u8, src2: u8, destination: u8) -> Self { + Self::DataProcessing2Src(ARM64InstructionDataProcessing { + size, + opcode: 0b000010, + dest: destination, + src1, + src2, + ..ARM64InstructionDataProcessing::default() + }) + } + + #[must_use] + pub fn sdiv(size: OperandSize, src1: u8, src2: u8, destination: u8) -> Self { + Self::DataProcessing2Src(ARM64InstructionDataProcessing { + size, + opcode: 0b000011, + dest: destination, + src1, + src2, + ..ARM64InstructionDataProcessing::default() + }) + } +} diff --git a/rbpf/src/aligned_memory.rs b/rbpf/src/aligned_memory.rs new file mode 100644 index 00000000000000..a9dcfe56e5e1d5 --- /dev/null +++ b/rbpf/src/aligned_memory.rs @@ -0,0 +1,296 @@ +//! Aligned memory + +use std::{mem, ptr}; + +/// Scalar types, aka "plain old data" +pub trait Pod {} + +impl Pod for bool {} +impl Pod for u8 {} +impl Pod for u16 {} +impl Pod for u32 {} +impl Pod for u64 {} +impl Pod for i8 {} +impl Pod for i16 {} +impl Pod for i32 {} +impl Pod for i64 {} + +/// Provides u8 slices at a specified alignment +#[derive(Debug, PartialEq, Eq)] +pub struct AlignedMemory { + max_len: usize, + align_offset: usize, + mem: Vec, + zero_up_to_max_len: bool, +} + +impl AlignedMemory { + fn get_mem(max_len: usize) -> (Vec, usize) { + let mut mem: Vec = Vec::with_capacity(max_len.saturating_add(ALIGN)); + mem.push(0); + let align_offset = mem.as_ptr().align_offset(ALIGN); + mem.resize(align_offset, 0); + (mem, align_offset) + } + fn get_mem_zeroed(max_len: usize) -> (Vec, usize) { + // use calloc() to get zeroed memory from the OS instead of using + // malloc() + memset(), see + // https://github.com/rust-lang/rust/issues/54628 + let mut mem = vec![0; max_len]; + let align_offset = mem.as_ptr().align_offset(ALIGN); + mem.resize(max_len.saturating_add(align_offset), 0); + (mem, align_offset) + } + /// Returns a filled AlignedMemory by copying the given slice + pub fn from_slice(data: &[u8]) -> Self { + let max_len = data.len(); + let (mut mem, align_offset) = Self::get_mem(max_len); + mem.extend_from_slice(data); + Self { + max_len, + align_offset, + mem, + zero_up_to_max_len: false, + } + } + /// Returns a new empty AlignedMemory with uninitialized preallocated memory + pub fn with_capacity(max_len: usize) -> Self { + let (mem, align_offset) = Self::get_mem(max_len); + Self { + max_len, + align_offset, + mem, + zero_up_to_max_len: false, + } + } + /// Returns a new empty AlignedMemory with zero initialized preallocated memory + pub fn with_capacity_zeroed(max_len: usize) -> Self { + let (mut mem, align_offset) = Self::get_mem_zeroed(max_len); + mem.truncate(align_offset); + Self { + max_len, + align_offset, + mem, + zero_up_to_max_len: true, + } + } + /// Returns a new filled AlignedMemory with zero initialized preallocated memory + pub fn zero_filled(max_len: usize) -> Self { + let (mem, align_offset) = Self::get_mem_zeroed(max_len); + Self { + max_len, + align_offset, + mem, + zero_up_to_max_len: true, + } + } + /// Calculate memory size + pub fn mem_size(&self) -> usize { + self.mem.capacity().saturating_add(mem::size_of::()) + } + /// Get the length of the data + pub fn len(&self) -> usize { + self.mem.len().saturating_sub(self.align_offset) + } + /// Is the memory empty + pub fn is_empty(&self) -> bool { + self.mem.len() == self.align_offset + } + /// Get the current write index + pub fn write_index(&self) -> usize { + self.mem.len() + } + /// Get an aligned slice + pub fn as_slice(&self) -> &[u8] { + let start = self.align_offset; + let end = self.mem.len(); + &self.mem[start..end] + } + /// Get an aligned mutable slice + pub fn as_slice_mut(&mut self) -> &mut [u8] { + let start = self.align_offset; + let end = self.mem.len(); + &mut self.mem[start..end] + } + /// Grows memory with `value` repeated `num` times starting at the `write_index` + pub fn fill_write(&mut self, num: usize, value: u8) -> std::io::Result<()> { + let new_len = match ( + self.mem.len().checked_add(num), + self.align_offset.checked_add(self.max_len), + ) { + (Some(new_len), Some(allocation_end)) if new_len <= allocation_end => new_len, + _ => { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "aligned memory resize failed", + )) + } + }; + if self.zero_up_to_max_len && value == 0 { + // Safe because everything up to `max_len` is zeroed and no shrinking is allowed + unsafe { + self.mem.set_len(new_len); + } + } else { + self.mem.resize(new_len, value); + } + Ok(()) + } + + /// Write a generic type T into the memory. + /// + /// # Safety + /// + /// Unsafe since it assumes that there is enough capacity. + pub unsafe fn write_unchecked(&mut self, value: T) { + let pos = self.mem.len(); + let new_len = pos.saturating_add(mem::size_of::()); + debug_assert!(new_len <= self.align_offset.saturating_add(self.max_len)); + self.mem.set_len(new_len); + ptr::write_unaligned( + self.mem.get_unchecked_mut(pos..new_len).as_mut_ptr().cast(), + value, + ); + } + + /// Write a slice of bytes into the memory. + /// + /// # Safety + /// + /// Unsafe since it assumes that there is enough capacity. + pub unsafe fn write_all_unchecked(&mut self, value: &[u8]) { + let pos = self.mem.len(); + let new_len = pos.saturating_add(value.len()); + debug_assert!(new_len <= self.align_offset.saturating_add(self.max_len)); + self.mem.set_len(new_len); + self.mem + .get_unchecked_mut(pos..new_len) + .copy_from_slice(value); + } +} + +// Custom Clone impl is needed to ensure alignment. Derived clone would just +// clone self.mem and there would be no guarantee that the clone allocation is +// aligned. +impl Clone for AlignedMemory { + fn clone(&self) -> Self { + AlignedMemory::from_slice(self.as_slice()) + } +} + +impl std::io::Write for AlignedMemory { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + match ( + self.mem.len().checked_add(buf.len()), + self.align_offset.checked_add(self.max_len), + ) { + (Some(new_len), Some(allocation_end)) if new_len <= allocation_end => {} + _ => { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "aligned memory write failed", + )) + } + } + self.mem.extend_from_slice(buf); + Ok(buf.len()) + } + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } +} + +impl> From for AlignedMemory { + fn from(bytes: T) -> Self { + AlignedMemory::from_slice(bytes.as_ref()) + } +} + +/// Returns true if `ptr` is aligned to `align`. +pub fn is_memory_aligned(ptr: usize, align: usize) -> bool { + ptr.checked_rem(align) + .map(|remainder| remainder == 0) + .unwrap_or(false) +} + +#[cfg(test)] +mod tests { + #![allow(clippy::arithmetic_side_effects)] + use {super::*, std::io::Write}; + + fn do_test() { + let mut aligned_memory = AlignedMemory::::with_capacity(10); + + assert_eq!(aligned_memory.write(&[42u8; 1]).unwrap(), 1); + assert_eq!(aligned_memory.write(&[42u8; 9]).unwrap(), 9); + assert_eq!(aligned_memory.as_slice(), &[42u8; 10]); + assert_eq!(aligned_memory.write(&[42u8; 0]).unwrap(), 0); + assert_eq!(aligned_memory.as_slice(), &[42u8; 10]); + aligned_memory.write(&[42u8; 1]).unwrap_err(); + assert_eq!(aligned_memory.as_slice(), &[42u8; 10]); + aligned_memory.as_slice_mut().copy_from_slice(&[84u8; 10]); + assert_eq!(aligned_memory.as_slice(), &[84u8; 10]); + + let mut aligned_memory = AlignedMemory::::with_capacity_zeroed(10); + aligned_memory.fill_write(5, 0).unwrap(); + aligned_memory.fill_write(2, 1).unwrap(); + assert_eq!(aligned_memory.write(&[2u8; 3]).unwrap(), 3); + assert_eq!(aligned_memory.as_slice(), &[0, 0, 0, 0, 0, 1, 1, 2, 2, 2]); + aligned_memory.fill_write(1, 3).unwrap_err(); + aligned_memory.write(&[4u8; 1]).unwrap_err(); + assert_eq!(aligned_memory.as_slice(), &[0, 0, 0, 0, 0, 1, 1, 2, 2, 2]); + + let aligned_memory = AlignedMemory::::zero_filled(10); + assert_eq!(aligned_memory.len(), 10); + assert_eq!(aligned_memory.as_slice(), &[0u8; 10]); + + let mut aligned_memory = AlignedMemory::::with_capacity_zeroed(15); + unsafe { + aligned_memory.write_unchecked::(42); + assert_eq!(aligned_memory.len(), 1); + aligned_memory.write_unchecked::(0xCAFEBADDDEADCAFE); + assert_eq!(aligned_memory.len(), 9); + aligned_memory.fill_write(3, 0).unwrap(); + aligned_memory.write_all_unchecked(b"foo"); + assert_eq!(aligned_memory.len(), 15); + } + let mem = aligned_memory.as_slice(); + assert_eq!(mem[0], 42); + assert_eq!( + unsafe { + ptr::read_unaligned::(mem[1..1 + mem::size_of::()].as_ptr().cast()) + }, + 0xCAFEBADDDEADCAFE + ); + assert_eq!(&mem[1 + mem::size_of::()..][..3], &[0, 0, 0]); + assert_eq!(&mem[1 + mem::size_of::() + 3..], b"foo"); + } + + #[test] + fn test_aligned_memory() { + do_test::<1>(); + do_test::<32768>(); + } + + #[cfg(debug_assertions)] + #[test] + #[should_panic(expected = "<= self.align_offset.saturating_add(self.max_len)")] + fn test_write_unchecked_debug_assert() { + let mut aligned_memory = AlignedMemory::<8>::with_capacity(15); + unsafe { + aligned_memory.write_unchecked::(42); + aligned_memory.write_unchecked::(24); + } + } + + #[cfg(debug_assertions)] + #[test] + #[should_panic(expected = "<= self.align_offset.saturating_add(self.max_len)")] + fn test_write_all_unchecked_debug_assert() { + let mut aligned_memory = AlignedMemory::<8>::with_capacity(5); + unsafe { + aligned_memory.write_all_unchecked(b"foo"); + aligned_memory.write_all_unchecked(b"bar"); + } + } +} diff --git a/rbpf/src/asm_parser.rs b/rbpf/src/asm_parser.rs new file mode 100644 index 00000000000000..b006b832999ed8 --- /dev/null +++ b/rbpf/src/asm_parser.rs @@ -0,0 +1,669 @@ +#![allow(clippy::arithmetic_side_effects)] +// Copyright 2017 Rich Lane +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +// Rust-doc comments were left in the module, but it is no longer publicly exposed from the root +// file of the crate. Do not expect to find those comments in the documentation of the crate. + +//! This module parses eBPF assembly language source code. + +use combine::{ + attempt, between, + char::{alpha_num, char, digit, hex_digit, spaces, string}, + combine_parse_partial, combine_parser_impl, + easy::{Error, Errors, Info}, + eof, many, many1, one_of, optional, parse_mode, parser, sep_by, skip_many, + stream::state::{SourcePosition, State}, + Parser, Stream, +}; + +/// Operand of an instruction. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Operand { + /// Register number. + Register(i64), + /// Jump offset or immediate. + Integer(i64), + /// Register number and offset. + Memory(i64, i64), + /// Jump target label. + Label(String), +} + +/// Parsed statement. +#[derive(Debug, PartialEq, Eq)] +pub enum Statement { + /// Parsed label (name). + Label { name: String }, + /// Parsed instruction (name, operands). + Instruction { + name: String, + operands: Vec, + }, +} + +parser! { + fn ident[I]()(I) -> String where [I: Stream] { + many1(alpha_num().or(char('_'))) + } +} + +parser! { + fn mnemonic[I]()(I) -> String where [I: Stream] { + many1(alpha_num()) + } +} + +parser! { + fn integer[I]()(I) -> i64 where [I: Stream] { + let sign = optional(one_of("-+".chars())).map(|x| match x { + Some('-') => -1, + _ => 1, + }); + let hex = string("0x") + .with(many1(hex_digit())) + .map(|x: String| u64::from_str_radix(&x, 16).unwrap_or(0) as i64); + let dec = many1(digit()).map(|x: String| x.parse::().unwrap_or(0)); + (sign, attempt(hex).or(dec)) + .map(|(s, x)| s * x) + } +} + +parser! { + fn register[I]()(I) -> i64 where [I: Stream] { + char('r') + .with(many1(digit())) + .map(|x: String| x.parse::().unwrap_or(0)) + } +} + +parser! { + fn operand[I]()(I) -> Operand where [I: Stream] { + let register_operand = register().map(Operand::Register); + let immediate = integer().map(Operand::Integer); + let memory = between( + char('['), + char(']'), + (register(), optional(integer())), + ) + .map(|t| Operand::Memory(t.0, t.1.unwrap_or(0))); + let label = ident().map(Operand::Label); + register_operand + .or(immediate) + .or(memory) + .or(label) + } +} + +parser! { + fn label[I]()(I) -> Statement where [I: Stream] { + (ident(), char(':')) + .map(|t| Statement::Label { name: t.0 }) + } +} + +parser! { + fn instruction[I]()(I) -> Statement where [I: Stream] { + let operands = sep_by(operand(), char(',').skip(skip_many(char(' ')))); + (mnemonic().skip(skip_many(char(' '))), operands) + .map(|t| Statement::Instruction { name: t.0, operands: t.1 }) + } +} + +fn format_info(info: &Info) -> String { + match *info { + Info::Token(x) => format!("{x:?}"), + Info::Range(x) => format!("{x:?}"), + Info::Owned(ref x) => x.clone(), + Info::Borrowed(x) => x.to_string(), + } +} + +fn format_error(error: &Error) -> String { + match *error { + Error::Unexpected(ref x) => format!("unexpected {}", format_info(x)), + Error::Expected(ref x) => format!("expected {}", format_info(x)), + Error::Message(ref x) => format_info(x), + Error::Other(ref x) => format!("{x:?}"), + } +} + +fn format_parse_error(parse_error: &Errors) -> String { + format!( + "Parse error at line {} column {}: {}", + parse_error.position.line, + parse_error.position.column, + parse_error + .errors + .iter() + .map(format_error) + .collect::>() + .join(", ") + ) +} + +/// Parse a string into a list of instructions. +/// +/// The instructions are not validated and may have invalid names and operand types. +pub fn parse(input: &str) -> Result, String> { + match spaces() + .with(many(attempt(label()).or(instruction()).skip(spaces()))) + .skip(eof()) + .easy_parse(State::with_positioner(input, SourcePosition::default())) + { + Ok((insts, _)) => Ok(insts), + Err(err) => Err(format_parse_error(&err)), + } +} + +#[cfg(test)] +mod tests { + use super::{ + ident, instruction, integer, mnemonic, operand, parse, register, Operand, Statement, + }; + use combine::Parser; + + // Unit tests for the different kinds of parsers. + + #[test] + fn test_ident() { + assert_eq!( + ident().parse("entrypoint"), + Ok(("entrypoint".to_string(), "")) + ); + assert_eq!(ident().parse("lbb_1"), Ok(("lbb_1".to_string(), ""))); + assert_eq!(ident().parse("exit:"), Ok(("exit".to_string(), ":"))); + } + + #[test] + fn test_mnemonic() { + assert_eq!(mnemonic().parse("nop"), Ok(("nop".to_string(), ""))); + assert_eq!(mnemonic().parse("add32"), Ok(("add32".to_string(), ""))); + assert_eq!(mnemonic().parse("add32*"), Ok(("add32".to_string(), "*"))); + } + + #[test] + fn test_integer() { + assert_eq!(integer().parse("0"), Ok((0, ""))); + assert_eq!(integer().parse("42"), Ok((42, ""))); + assert_eq!(integer().parse("+42"), Ok((42, ""))); + assert_eq!(integer().parse("-42"), Ok((-42, ""))); + assert_eq!(integer().parse("0x0"), Ok((0, ""))); + assert_eq!( + integer().parse("0x123456789abcdef0"), + Ok((0x123456789abcdef0, "")) + ); + assert_eq!(integer().parse("-0x1f"), Ok((-31, ""))); + } + + #[test] + fn test_register() { + assert_eq!(register().parse("r0"), Ok((0, ""))); + assert_eq!(register().parse("r15"), Ok((15, ""))); + } + + #[test] + fn test_operand() { + assert_eq!(operand().parse("r0"), Ok((Operand::Register(0), ""))); + assert_eq!(operand().parse("r15"), Ok((Operand::Register(15), ""))); + assert_eq!(operand().parse("0"), Ok((Operand::Integer(0), ""))); + assert_eq!(operand().parse("42"), Ok((Operand::Integer(42), ""))); + assert_eq!(operand().parse("[r1]"), Ok((Operand::Memory(1, 0), ""))); + assert_eq!(operand().parse("[r3+5]"), Ok((Operand::Memory(3, 5), ""))); + assert_eq!( + operand().parse("[r3+0x1f]"), + Ok((Operand::Memory(3, 31), "")) + ); + assert_eq!( + operand().parse("[r3-0x1f]"), + Ok((Operand::Memory(3, -31), "")) + ); + } + + #[test] + fn test_instruction() { + assert_eq!( + instruction().parse("exit"), + Ok(( + Statement::Instruction { + name: "exit".to_string(), + operands: vec![], + }, + "" + )) + ); + + assert_eq!( + instruction().parse("call 2"), + Ok(( + Statement::Instruction { + name: "call".to_string(), + operands: vec![Operand::Integer(2)], + }, + "" + )) + ); + + assert_eq!( + instruction().parse("addi r1, 2"), + Ok(( + Statement::Instruction { + name: "addi".to_string(), + operands: vec![Operand::Register(1), Operand::Integer(2)], + }, + "" + )) + ); + + assert_eq!( + instruction().parse("ldxb r2, [r1+12]"), + Ok(( + Statement::Instruction { + name: "ldxb".to_string(), + operands: vec![Operand::Register(2), Operand::Memory(1, 12)], + }, + "" + )) + ); + + assert_eq!( + instruction().parse("lsh r3, 0x8"), + Ok(( + Statement::Instruction { + name: "lsh".to_string(), + operands: vec![Operand::Register(3), Operand::Integer(8)], + }, + "" + )) + ); + + assert_eq!( + instruction().parse("jne r3, 0x8, +37"), + Ok(( + Statement::Instruction { + name: "jne".to_string(), + operands: vec![ + Operand::Register(3), + Operand::Integer(8), + Operand::Integer(37) + ], + }, + "" + )) + ); + + // Whitespace between operands is optional. + assert_eq!( + instruction().parse("jne r3,0x8,+37"), + Ok(( + Statement::Instruction { + name: "jne".to_string(), + operands: vec![ + Operand::Register(3), + Operand::Integer(8), + Operand::Integer(37) + ], + }, + "" + )) + ); + } + + // Other unit tests: try to parse various set of instructions. + + #[test] + fn test_empty() { + assert_eq!(parse(""), Ok(vec![])); + } + + #[test] + fn test_exit() { + // No operands. + assert_eq!( + parse("exit"), + Ok(vec![Statement::Instruction { + name: "exit".to_string(), + operands: vec![], + }]) + ); + } + + #[test] + fn test_lsh() { + // Register and immediate operands. + assert_eq!( + parse("lsh r3, 0x20"), + Ok(vec![Statement::Instruction { + name: "lsh".to_string(), + operands: vec![Operand::Register(3), Operand::Integer(0x20)], + }]) + ); + } + + #[test] + fn test_ja() { + // Jump offset operand. + assert_eq!( + parse("ja +1"), + Ok(vec![Statement::Instruction { + name: "ja".to_string(), + operands: vec![Operand::Integer(1)], + }]) + ); + } + + #[test] + fn test_ldxh() { + // Register and memory operands. + assert_eq!( + parse("ldxh r4, [r1+12]"), + Ok(vec![Statement::Instruction { + name: "ldxh".to_string(), + operands: vec![Operand::Register(4), Operand::Memory(1, 12)], + }]) + ); + } + + #[test] + fn test_tcp_sack() { + // Sample program from ubpf. + // We could technically indent the instructions since the parser support white spaces at + // the beginning, but there is another test for that. + let src = "\ +ldxb r2, [r1+12] +ldxb r3, [r1+13] +lsh r3, 0x8 +or r3, r2 +mov r0, 0x0 +jne r3, 0x8, +37 +ldxb r2, [r1+23] +jne r2, 0x6, +35 +ldxb r2, [r1+14] +add r1, 0xe +and r2, 0xf +lsh r2, 0x2 +add r1, r2 +mov r0, 0x0 +ldxh r4, [r1+12] +add r1, 0x14 +rsh r4, 0x2 +and r4, 0x3c +mov r2, r4 +add r2, 0xffffffec +mov r5, 0x15 +mov r3, 0x0 +jgt r5, r4, +20 +mov r5, r3 +lsh r5, 0x20 +arsh r5, 0x20 +mov r4, r1 +add r4, r5 +ldxb r5, [r4] +jeq r5, 0x1, +4 +jeq r5, 0x0, +12 +mov r6, r3 +jeq r5, 0x5, +9 +ja +2 +add r3, 0x1 +mov r6, r3 +ldxb r3, [r4+1] +add r3, r6 +lsh r3, 0x20 +arsh r3, 0x20 +jsgt r2, r3, -18 +ja +1 +mov r0, 0x1 +exit +"; + + assert_eq!( + parse(src), + Ok(vec![ + Statement::Instruction { + name: "ldxb".to_string(), + operands: vec![Operand::Register(2), Operand::Memory(1, 12)], + }, + Statement::Instruction { + name: "ldxb".to_string(), + operands: vec![Operand::Register(3), Operand::Memory(1, 13)], + }, + Statement::Instruction { + name: "lsh".to_string(), + operands: vec![Operand::Register(3), Operand::Integer(8)], + }, + Statement::Instruction { + name: "or".to_string(), + operands: vec![Operand::Register(3), Operand::Register(2)], + }, + Statement::Instruction { + name: "mov".to_string(), + operands: vec![Operand::Register(0), Operand::Integer(0)], + }, + Statement::Instruction { + name: "jne".to_string(), + operands: vec![ + Operand::Register(3), + Operand::Integer(8), + Operand::Integer(37) + ], + }, + Statement::Instruction { + name: "ldxb".to_string(), + operands: vec![Operand::Register(2), Operand::Memory(1, 23)], + }, + Statement::Instruction { + name: "jne".to_string(), + operands: vec![ + Operand::Register(2), + Operand::Integer(6), + Operand::Integer(35) + ], + }, + Statement::Instruction { + name: "ldxb".to_string(), + operands: vec![Operand::Register(2), Operand::Memory(1, 14)], + }, + Statement::Instruction { + name: "add".to_string(), + operands: vec![Operand::Register(1), Operand::Integer(14)], + }, + Statement::Instruction { + name: "and".to_string(), + operands: vec![Operand::Register(2), Operand::Integer(15)], + }, + Statement::Instruction { + name: "lsh".to_string(), + operands: vec![Operand::Register(2), Operand::Integer(2)], + }, + Statement::Instruction { + name: "add".to_string(), + operands: vec![Operand::Register(1), Operand::Register(2)], + }, + Statement::Instruction { + name: "mov".to_string(), + operands: vec![Operand::Register(0), Operand::Integer(0)], + }, + Statement::Instruction { + name: "ldxh".to_string(), + operands: vec![Operand::Register(4), Operand::Memory(1, 12)], + }, + Statement::Instruction { + name: "add".to_string(), + operands: vec![Operand::Register(1), Operand::Integer(20)], + }, + Statement::Instruction { + name: "rsh".to_string(), + operands: vec![Operand::Register(4), Operand::Integer(2)], + }, + Statement::Instruction { + name: "and".to_string(), + operands: vec![Operand::Register(4), Operand::Integer(60)], + }, + Statement::Instruction { + name: "mov".to_string(), + operands: vec![Operand::Register(2), Operand::Register(4)], + }, + Statement::Instruction { + name: "add".to_string(), + operands: vec![Operand::Register(2), Operand::Integer(4294967276)], + }, + Statement::Instruction { + name: "mov".to_string(), + operands: vec![Operand::Register(5), Operand::Integer(21)], + }, + Statement::Instruction { + name: "mov".to_string(), + operands: vec![Operand::Register(3), Operand::Integer(0)], + }, + Statement::Instruction { + name: "jgt".to_string(), + operands: vec![ + Operand::Register(5), + Operand::Register(4), + Operand::Integer(20) + ], + }, + Statement::Instruction { + name: "mov".to_string(), + operands: vec![Operand::Register(5), Operand::Register(3)], + }, + Statement::Instruction { + name: "lsh".to_string(), + operands: vec![Operand::Register(5), Operand::Integer(32)], + }, + Statement::Instruction { + name: "arsh".to_string(), + operands: vec![Operand::Register(5), Operand::Integer(32)], + }, + Statement::Instruction { + name: "mov".to_string(), + operands: vec![Operand::Register(4), Operand::Register(1)], + }, + Statement::Instruction { + name: "add".to_string(), + operands: vec![Operand::Register(4), Operand::Register(5)], + }, + Statement::Instruction { + name: "ldxb".to_string(), + operands: vec![Operand::Register(5), Operand::Memory(4, 0)], + }, + Statement::Instruction { + name: "jeq".to_string(), + operands: vec![ + Operand::Register(5), + Operand::Integer(1), + Operand::Integer(4) + ], + }, + Statement::Instruction { + name: "jeq".to_string(), + operands: vec![ + Operand::Register(5), + Operand::Integer(0), + Operand::Integer(12) + ], + }, + Statement::Instruction { + name: "mov".to_string(), + operands: vec![Operand::Register(6), Operand::Register(3)], + }, + Statement::Instruction { + name: "jeq".to_string(), + operands: vec![ + Operand::Register(5), + Operand::Integer(5), + Operand::Integer(9) + ], + }, + Statement::Instruction { + name: "ja".to_string(), + operands: vec![Operand::Integer(2)], + }, + Statement::Instruction { + name: "add".to_string(), + operands: vec![Operand::Register(3), Operand::Integer(1)], + }, + Statement::Instruction { + name: "mov".to_string(), + operands: vec![Operand::Register(6), Operand::Register(3)], + }, + Statement::Instruction { + name: "ldxb".to_string(), + operands: vec![Operand::Register(3), Operand::Memory(4, 1)], + }, + Statement::Instruction { + name: "add".to_string(), + operands: vec![Operand::Register(3), Operand::Register(6)], + }, + Statement::Instruction { + name: "lsh".to_string(), + operands: vec![Operand::Register(3), Operand::Integer(32)], + }, + Statement::Instruction { + name: "arsh".to_string(), + operands: vec![Operand::Register(3), Operand::Integer(32)], + }, + Statement::Instruction { + name: "jsgt".to_string(), + operands: vec![ + Operand::Register(2), + Operand::Register(3), + Operand::Integer(-18) + ], + }, + Statement::Instruction { + name: "ja".to_string(), + operands: vec![Operand::Integer(1)], + }, + Statement::Instruction { + name: "mov".to_string(), + operands: vec![Operand::Register(0), Operand::Integer(1)], + }, + Statement::Instruction { + name: "exit".to_string(), + operands: vec![], + } + ]) + ); + } + + #[test] + fn test_error_eof() { + // Unexpected end of input in a register name. + assert_eq!( + parse("lsh r"), + Err( + "Parse error at line 1 column 6: unexpected end of input, expected digit" + .to_string() + ) + ); + } + + #[test] + fn test_error_unexpected_character() { + // Unexpected character at end of input. + assert_eq!( + parse("exit\n^"), + Err( + "Parse error at line 2 column 1: unexpected '^', expected letter or digit, expected \'_\', expected whitespaces, expected end of input".to_string() + ) + ); + } + + #[test] + fn test_initial_whitespace() { + assert_eq!( + parse( + " + exit" + ), + Ok(vec![Statement::Instruction { + name: "exit".to_string(), + operands: vec![], + }]) + ); + } +} diff --git a/rbpf/src/assembler.rs b/rbpf/src/assembler.rs new file mode 100644 index 00000000000000..97e6721e08bf8f --- /dev/null +++ b/rbpf/src/assembler.rs @@ -0,0 +1,446 @@ +#![allow(clippy::arithmetic_side_effects)] +// Copyright 2017 Rich Lane +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +//! This module translates eBPF assembly language to binary. + +use self::InstructionType::{ + AluBinary, AluUnary, CallImm, CallReg, Endian, JumpConditional, JumpUnconditional, LoadAbs, + LoadDwImm, LoadInd, LoadReg, NoOperand, StoreImm, StoreReg, Syscall, +}; +use crate::{ + asm_parser::{ + parse, + Operand::{Integer, Label, Memory, Register}, + Statement, + }, + ebpf::{self, Insn}, + elf::Executable, + program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, + vm::ContextObject, +}; +use std::{collections::HashMap, sync::Arc}; + +#[derive(Clone, Copy, Debug, PartialEq)] +enum InstructionType { + AluBinary, + AluUnary, + LoadDwImm, + LoadAbs, + LoadInd, + LoadReg, + StoreImm, + StoreReg, + JumpUnconditional, + JumpConditional, + Syscall, + CallImm, + CallReg, + Endian(i64), + NoOperand, +} + +fn make_instruction_map() -> HashMap { + let mut result = HashMap::new(); + + let alu_binary_ops = [ + ("add", ebpf::BPF_ADD), + ("sub", ebpf::BPF_SUB), + ("mul", ebpf::BPF_MUL), + ("div", ebpf::BPF_DIV), + ("or", ebpf::BPF_OR), + ("and", ebpf::BPF_AND), + ("lsh", ebpf::BPF_LSH), + ("rsh", ebpf::BPF_RSH), + ("mod", ebpf::BPF_MOD), + ("xor", ebpf::BPF_XOR), + ("mov", ebpf::BPF_MOV), + ("arsh", ebpf::BPF_ARSH), + ("hor", ebpf::BPF_HOR), + ]; + + let mem_sizes = [ + ("w", ebpf::BPF_W), + ("h", ebpf::BPF_H), + ("b", ebpf::BPF_B), + ("dw", ebpf::BPF_DW), + ]; + + let jump_conditions = [ + ("jeq", ebpf::BPF_JEQ), + ("jgt", ebpf::BPF_JGT), + ("jge", ebpf::BPF_JGE), + ("jlt", ebpf::BPF_JLT), + ("jle", ebpf::BPF_JLE), + ("jset", ebpf::BPF_JSET), + ("jne", ebpf::BPF_JNE), + ("jsgt", ebpf::BPF_JSGT), + ("jsge", ebpf::BPF_JSGE), + ("jslt", ebpf::BPF_JSLT), + ("jsle", ebpf::BPF_JSLE), + ]; + + { + let mut entry = |name: &str, inst_type: InstructionType, opc: u8| { + result.insert(name.to_string(), (inst_type, opc)) + }; + + // Miscellaneous. + entry("exit", NoOperand, ebpf::EXIT); + entry("ja", JumpUnconditional, ebpf::JA); + entry("syscall", Syscall, ebpf::CALL_IMM); + entry("call", CallImm, ebpf::CALL_IMM); + entry("callx", CallReg, ebpf::CALL_REG); + entry("lddw", LoadDwImm, ebpf::LD_DW_IMM); + + // AluUnary. + entry("neg", AluUnary, ebpf::NEG64); + entry("neg32", AluUnary, ebpf::NEG32); + entry("neg64", AluUnary, ebpf::NEG64); + + // AluBinary. + for &(name, opc) in &alu_binary_ops { + entry(name, AluBinary, ebpf::BPF_ALU64 | opc); + entry(&format!("{name}32"), AluBinary, ebpf::BPF_ALU | opc); + entry(&format!("{name}64"), AluBinary, ebpf::BPF_ALU64 | opc); + } + + // Product Quotient Remainder. + entry( + "lmul", + AluBinary, + ebpf::BPF_PQR | ebpf::BPF_B | ebpf::BPF_LMUL, + ); + entry( + "lmul64", + AluBinary, + ebpf::BPF_PQR | ebpf::BPF_B | ebpf::BPF_LMUL, + ); + entry("lmul32", AluBinary, ebpf::BPF_PQR | ebpf::BPF_LMUL); + entry( + "uhmul", + AluBinary, + ebpf::BPF_PQR | ebpf::BPF_B | ebpf::BPF_UHMUL, + ); + entry( + "uhmul64", + AluBinary, + ebpf::BPF_PQR | ebpf::BPF_B | ebpf::BPF_UHMUL, + ); + entry("uhmul32", AluBinary, ebpf::BPF_PQR | ebpf::BPF_UHMUL); + entry( + "shmul", + AluBinary, + ebpf::BPF_PQR | ebpf::BPF_B | ebpf::BPF_SHMUL, + ); + entry( + "shmul64", + AluBinary, + ebpf::BPF_PQR | ebpf::BPF_B | ebpf::BPF_SHMUL, + ); + entry("shmul32", AluBinary, ebpf::BPF_PQR | ebpf::BPF_SHMUL); + entry( + "udiv", + AluBinary, + ebpf::BPF_PQR | ebpf::BPF_B | ebpf::BPF_UDIV, + ); + entry( + "udiv64", + AluBinary, + ebpf::BPF_PQR | ebpf::BPF_B | ebpf::BPF_UDIV, + ); + entry("udiv32", AluBinary, ebpf::BPF_PQR | ebpf::BPF_UDIV); + entry( + "urem", + AluBinary, + ebpf::BPF_PQR | ebpf::BPF_B | ebpf::BPF_UREM, + ); + entry( + "urem64", + AluBinary, + ebpf::BPF_PQR | ebpf::BPF_B | ebpf::BPF_UREM, + ); + entry("urem32", AluBinary, ebpf::BPF_PQR | ebpf::BPF_UREM); + entry( + "sdiv", + AluBinary, + ebpf::BPF_PQR | ebpf::BPF_B | ebpf::BPF_SDIV, + ); + entry( + "sdiv64", + AluBinary, + ebpf::BPF_PQR | ebpf::BPF_B | ebpf::BPF_SDIV, + ); + entry("sdiv32", AluBinary, ebpf::BPF_PQR | ebpf::BPF_SDIV); + entry( + "srem", + AluBinary, + ebpf::BPF_PQR | ebpf::BPF_B | ebpf::BPF_SREM, + ); + entry( + "srem64", + AluBinary, + ebpf::BPF_PQR | ebpf::BPF_B | ebpf::BPF_SREM, + ); + entry("srem32", AluBinary, ebpf::BPF_PQR | ebpf::BPF_SREM); + + // LoadAbs, LoadInd, LoadReg, StoreImm, and StoreReg. + for &(suffix, size) in &mem_sizes { + entry( + &format!("ldabs{suffix}"), + LoadAbs, + ebpf::BPF_ABS | ebpf::BPF_LD | size, + ); + entry( + &format!("ldind{suffix}"), + LoadInd, + ebpf::BPF_IND | ebpf::BPF_LD | size, + ); + entry( + &format!("ldx{suffix}"), + LoadReg, + ebpf::BPF_MEM | ebpf::BPF_LDX | size, + ); + entry( + &format!("st{suffix}"), + StoreImm, + ebpf::BPF_MEM | ebpf::BPF_ST | size, + ); + entry( + &format!("stx{suffix}"), + StoreReg, + ebpf::BPF_MEM | ebpf::BPF_STX | size, + ); + } + + // JumpConditional. + for &(name, condition) in &jump_conditions { + entry(name, JumpConditional, ebpf::BPF_JMP | condition); + } + + // Endian. + for &size in &[16, 32, 64] { + entry(&format!("be{size}"), Endian(size), ebpf::BE); + entry(&format!("le{size}"), Endian(size), ebpf::LE); + } + } + + result +} + +fn insn(opc: u8, dst: i64, src: i64, off: i64, imm: i64) -> Result { + if !(0..16).contains(&dst) { + return Err(format!("Invalid destination register {dst}")); + } + if dst < 0 || src >= 16 { + return Err(format!("Invalid source register {src}")); + } + if off < i16::MIN as i64 || off > i16::MAX as i64 { + return Err(format!("Invalid offset {off}")); + } + if imm < i32::MIN as i64 || imm > i32::MAX as i64 { + return Err(format!("Invalid immediate {imm}")); + } + Ok(Insn { + ptr: 0, + opc, + dst: dst as u8, + src: src as u8, + off: off as i16, + imm, + }) +} + +/// Parse assembly source and translate to binary. +/// +/// # Examples +/// +/// ``` +/// use solana_rbpf::{assembler::assemble, program::BuiltinProgram, vm::{Config, TestContextObject}}; +/// let executable = assemble::( +/// "add64 r1, 0x605 +/// mov64 r2, 0x32 +/// mov64 r1, r0 +/// be16 r0 +/// neg64 r2 +/// exit", +/// std::sync::Arc::new(BuiltinProgram::new_mock()), +/// ).unwrap(); +/// let program = executable.get_text_bytes().1; +/// println!("{:?}", program); +/// # assert_eq!(program, +/// # &[0x07, 0x01, 0x00, 0x00, 0x05, 0x06, 0x00, 0x00, +/// # 0xb7, 0x02, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, +/// # 0xbf, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/// # 0xdc, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, +/// # 0x87, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/// # 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]); +/// ``` +/// +/// This will produce the following output: +/// +/// ```test +/// [0x07, 0x01, 0x00, 0x00, 0x05, 0x06, 0x00, 0x00, +/// 0xb7, 0x02, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, +/// 0xbf, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/// 0xdc, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, +/// 0x87, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] +/// ``` +pub fn assemble( + src: &str, + loader: Arc>, +) -> Result, String> { + let sbpf_version = if loader.get_config().enable_sbpf_v2 { + SBPFVersion::V2 + } else { + SBPFVersion::V1 + }; + fn resolve_label( + insn_ptr: usize, + labels: &HashMap<&str, usize>, + label: &str, + ) -> Result { + labels + .get(label) + .map(|target_pc| *target_pc as i64 - insn_ptr as i64 - 1) + .ok_or_else(|| format!("Label not found {label}")) + } + + let statements = parse(src)?; + let instruction_map = make_instruction_map(); + let mut insn_ptr = 0; + let mut function_registry = FunctionRegistry::default(); + let mut labels = HashMap::new(); + labels.insert("entrypoint", 0); + for statement in statements.iter() { + match statement { + Statement::Label { name } => { + if name.starts_with("function_") || name == "entrypoint" { + function_registry + .register_function(insn_ptr as u32, name.as_bytes().to_vec(), insn_ptr) + .map_err(|_| format!("Label hash collision {name}"))?; + } + labels.insert(name.as_str(), insn_ptr); + } + Statement::Instruction { name, .. } => { + insn_ptr += if name == "lddw" { 2 } else { 1 }; + } + } + } + insn_ptr = 0; + let mut instructions: Vec = Vec::new(); + for statement in statements.iter() { + if let Statement::Instruction { name, operands } = statement { + let name = name.as_str(); + match instruction_map.get(name) { + Some(&(inst_type, opc)) => { + let mut insn = match (inst_type, operands.as_slice()) { + (AluBinary, [Register(dst), Register(src)]) => { + insn(opc | ebpf::BPF_X, *dst, *src, 0, 0) + } + (AluBinary, [Register(dst), Integer(imm)]) => { + insn(opc | ebpf::BPF_K, *dst, 0, 0, *imm) + } + (AluUnary, [Register(dst)]) => insn(opc, *dst, 0, 0, 0), + (LoadAbs, [Integer(imm)]) => insn(opc, 0, 0, 0, *imm), + (LoadInd, [Register(src), Integer(imm)]) => insn(opc, 0, *src, 0, *imm), + (LoadReg, [Register(dst), Memory(src, off)]) + | (StoreReg, [Memory(dst, off), Register(src)]) => { + insn(opc, *dst, *src, *off, 0) + } + (StoreImm, [Memory(dst, off), Integer(imm)]) => { + insn(opc, *dst, 0, *off, *imm) + } + (NoOperand, []) => insn(opc, 0, 0, 0, 0), + (JumpUnconditional, [Integer(off)]) => insn(opc, 0, 0, *off, 0), + (JumpConditional, [Register(dst), Register(src), Integer(off)]) => { + insn(opc | ebpf::BPF_X, *dst, *src, *off, 0) + } + (JumpConditional, [Register(dst), Integer(imm), Integer(off)]) => { + insn(opc | ebpf::BPF_K, *dst, 0, *off, *imm) + } + (JumpUnconditional, [Label(label)]) => { + insn(opc, 0, 0, resolve_label(insn_ptr, &labels, label)?, 0) + } + (CallImm, [Integer(imm)]) => { + let target_pc = *imm + insn_ptr as i64 + 1; + let label = format!("function_{}", target_pc as usize); + function_registry + .register_function( + target_pc as u32, + label.as_bytes().to_vec(), + target_pc as usize, + ) + .map_err(|_| format!("Label hash collision {name}"))?; + insn(opc, 0, 1, 0, target_pc) + } + (CallReg, [Register(dst)]) => { + if sbpf_version.callx_uses_src_reg() { + insn(opc, 0, *dst, 0, 0) + } else { + insn(opc, 0, 0, 0, *dst) + } + } + (JumpConditional, [Register(dst), Register(src), Label(label)]) => insn( + opc | ebpf::BPF_X, + *dst, + *src, + resolve_label(insn_ptr, &labels, label)?, + 0, + ), + (JumpConditional, [Register(dst), Integer(imm), Label(label)]) => insn( + opc | ebpf::BPF_K, + *dst, + 0, + resolve_label(insn_ptr, &labels, label)?, + *imm, + ), + (Syscall, [Label(label)]) => insn( + opc, + 0, + 0, + 0, + ebpf::hash_symbol_name(label.as_bytes()) as i32 as i64, + ), + (CallImm, [Label(label)]) => { + let label: &str = label; + let target_pc = *labels + .get(label) + .ok_or_else(|| format!("Label not found {label}"))?; + insn(opc, 0, 1, 0, target_pc as i64) + } + (Endian(size), [Register(dst)]) => insn(opc, *dst, 0, 0, size), + (LoadDwImm, [Register(dst), Integer(imm)]) => { + insn(opc, *dst, 0, 0, (*imm << 32) >> 32) + } + _ => Err(format!("Unexpected operands: {operands:?}")), + }?; + insn.ptr = insn_ptr; + instructions.push(insn); + insn_ptr += 1; + if let LoadDwImm = inst_type { + if let Integer(imm) = operands[1] { + instructions.push(Insn { + ptr: insn_ptr, + imm: imm >> 32, + ..Insn::default() + }); + insn_ptr += 1; + } + } + } + None => return Err(format!("Invalid instruction {name:?}")), + } + } + } + let program = instructions + .iter() + .flat_map(|insn| insn.to_vec()) + .collect::>(); + Executable::::from_text_bytes(&program, loader, sbpf_version, function_registry) + .map_err(|err| format!("Executable constructor {err:?}")) +} diff --git a/rbpf/src/debugger.rs b/rbpf/src/debugger.rs new file mode 100644 index 00000000000000..81cfbe0b7f465f --- /dev/null +++ b/rbpf/src/debugger.rs @@ -0,0 +1,549 @@ +//! Debugger for the virtual machines' interpreter. + +use std::net::{TcpListener, TcpStream}; + +use gdbstub::common::Signal; +use gdbstub::conn::ConnectionExt; +use gdbstub::stub::{state_machine, GdbStub, SingleThreadStopReason}; + +use gdbstub::arch::lldb::{Encoding, Format, Generic, Register}; +use gdbstub::arch::RegId; + +use gdbstub::target; +use gdbstub::target::{Target, TargetError, TargetResult}; + +use core::convert::TryInto; + +use bpf_arch::reg::id::BpfRegId; +use bpf_arch::reg::BpfRegs; +use bpf_arch::Bpf; +use gdbstub::target::ext::base::singlethread::{SingleThreadBase, SingleThreadResume}; +use gdbstub::target::ext::lldb_register_info_override::{Callback, CallbackToken}; +use gdbstub::target::ext::section_offsets::Offsets; + +use crate::{ + ebpf, + error::{EbpfError, ProgramResult}, + interpreter::{DebugState, Interpreter}, + memory_region::AccessType, + vm::ContextObject, +}; + +type DynResult = Result>; + +fn wait_for_tcp(port: u16) -> DynResult { + let sockaddr = format!("127.0.0.1:{}", port); + eprintln!("Waiting for a Debugger connection on {:?}...", sockaddr); + + let sock = TcpListener::bind(sockaddr)?; + let (stream, addr) = sock.accept()?; + eprintln!("Debugger connected from {}", addr); + + Ok(stream) +} + +/// Connect to the debugger and hand over the control of the interpreter +pub fn execute(interpreter: &mut Interpreter, port: u16) { + let connection: Box> = + Box::new(wait_for_tcp(port).expect("Cannot connect to Debugger")); + let mut dbg = GdbStub::new(connection) + .run_state_machine(interpreter) + .expect("Cannot start debugging state machine"); + loop { + dbg = match dbg { + state_machine::GdbStubStateMachine::Idle(mut dbg_inner) => { + let byte = dbg_inner.borrow_conn().read().unwrap(); + dbg_inner.incoming_data(interpreter, byte).unwrap() + } + + state_machine::GdbStubStateMachine::Disconnected(_dbg_inner) => { + eprintln!("Client disconnected"); + break; + } + + state_machine::GdbStubStateMachine::CtrlCInterrupt(dbg_inner) => dbg_inner + .interrupt_handled( + interpreter, + Some(SingleThreadStopReason::Signal(Signal::SIGINT)), + ) + .unwrap(), + + state_machine::GdbStubStateMachine::Running(mut dbg_inner) => { + let conn = dbg_inner.borrow_conn(); + match interpreter.debug_state { + DebugState::Step => { + let mut stop_reason = if interpreter.step() { + SingleThreadStopReason::DoneStep + } else if let ProgramResult::Ok(result) = &interpreter.vm.program_result { + SingleThreadStopReason::Exited(*result as u8) + } else { + SingleThreadStopReason::Terminated(Signal::SIGSTOP) + }; + if interpreter.breakpoints.contains(&interpreter.get_dbg_pc()) { + stop_reason = SingleThreadStopReason::SwBreak(()); + } + dbg_inner.report_stop(interpreter, stop_reason).unwrap() + } + DebugState::Continue => loop { + if conn.peek().unwrap().is_some() { + let byte = dbg_inner.borrow_conn().read().unwrap(); + break dbg_inner.incoming_data(interpreter, byte).unwrap(); + } + if interpreter.step() { + if interpreter.breakpoints.contains(&interpreter.get_dbg_pc()) { + break dbg_inner + .report_stop(interpreter, SingleThreadStopReason::SwBreak(())) + .unwrap(); + } + } else if let ProgramResult::Ok(result) = &interpreter.vm.program_result { + break dbg_inner + .report_stop( + interpreter, + SingleThreadStopReason::Exited(*result as u8), + ) + .unwrap(); + } else { + break dbg_inner + .report_stop( + interpreter, + SingleThreadStopReason::Terminated(Signal::SIGSTOP), + ) + .unwrap(); + } + }, + } + } + }; + } +} + +impl<'a, 'b, C: ContextObject> Target for Interpreter<'a, 'b, C> { + type Arch = Bpf; + type Error = &'static str; + + #[inline(always)] + fn base_ops(&mut self) -> target::ext::base::BaseOps<'_, Self::Arch, Self::Error> { + target::ext::base::BaseOps::SingleThread(self) + } + + #[inline(always)] + fn support_breakpoints( + &mut self, + ) -> Option> { + Some(self) + } + + #[inline(always)] + fn support_section_offsets( + &mut self, + ) -> Option> { + Some(self) + } + + #[inline(always)] + fn support_lldb_register_info_override( + &mut self, + ) -> Option> + { + Some(self) + } +} + +fn get_host_ptr( + interpreter: &mut Interpreter, + mut vm_addr: u64, +) -> Result<*mut u8, EbpfError> { + if vm_addr < ebpf::MM_PROGRAM_START { + vm_addr += ebpf::MM_PROGRAM_START; + } + match interpreter.vm.memory_mapping.map( + AccessType::Load, + vm_addr, + std::mem::size_of::() as u64, + ) { + ProgramResult::Ok(host_addr) => Ok(host_addr as *mut u8), + ProgramResult::Err(err) => Err(err), + } +} + +impl<'a, 'b, C: ContextObject> SingleThreadBase for Interpreter<'a, 'b, C> { + fn read_registers(&mut self, regs: &mut BpfRegs) -> TargetResult<(), Self> { + for i in 0..10 { + regs.r[i] = self.reg[i]; + } + regs.sp = self.reg[ebpf::FRAME_PTR_REG]; + regs.pc = self.get_dbg_pc(); + Ok(()) + } + + fn write_registers(&mut self, regs: &BpfRegs) -> TargetResult<(), Self> { + for i in 0..10 { + self.reg[i] = regs.r[i]; + } + self.reg[ebpf::FRAME_PTR_REG] = regs.sp; + self.reg[11] = regs.pc; + Ok(()) + } + + #[inline(always)] + fn support_single_register_access( + &mut self, + ) -> Option> + { + Some(self) + } + + fn read_addrs(&mut self, start_addr: u64, data: &mut [u8]) -> TargetResult<(), Self> { + for (vm_addr, val) in (start_addr..).zip(data.iter_mut()) { + let host_ptr = match get_host_ptr(self, vm_addr) { + Ok(host_ptr) => host_ptr, + // The debugger is sometimes requesting more data than we have access to, just skip these + _ => continue, + }; + *val = unsafe { *host_ptr as u8 }; + } + Ok(()) + } + + fn write_addrs(&mut self, start_addr: u64, data: &[u8]) -> TargetResult<(), Self> { + for (_addr, _val) in (start_addr..).zip(data.iter().copied()) { + eprintln!("Memory write not supported"); + } + Ok(()) + } + + #[inline(always)] + fn support_resume( + &mut self, + ) -> Option> { + Some(self) + } +} + +impl<'a, 'b, C: ContextObject> target::ext::base::single_register_access::SingleRegisterAccess<()> + for Interpreter<'a, 'b, C> +{ + fn read_register( + &mut self, + _tid: (), + reg_id: BpfRegId, + buf: &mut [u8], + ) -> TargetResult { + match reg_id { + BpfRegId::Gpr(i) => { + let r = self.reg[i as usize]; + buf.copy_from_slice(&r.to_le_bytes()); + } + BpfRegId::Sp => buf.copy_from_slice(&self.reg[ebpf::FRAME_PTR_REG].to_le_bytes()), + BpfRegId::Pc => buf.copy_from_slice(&self.get_dbg_pc().to_le_bytes()), + BpfRegId::InstructionCountRemaining => { + buf.copy_from_slice(&self.vm.context_object_pointer.get_remaining().to_le_bytes()) + } + } + Ok(buf.len()) + } + + fn write_register(&mut self, _tid: (), reg_id: BpfRegId, val: &[u8]) -> TargetResult<(), Self> { + let r = u64::from_le_bytes( + val.try_into() + .map_err(|_| TargetError::Fatal("invalid data"))?, + ); + + match reg_id { + BpfRegId::Gpr(i) => self.reg[i as usize] = r, + BpfRegId::Sp => self.reg[ebpf::FRAME_PTR_REG] = r, + BpfRegId::Pc => self.reg[11] = r, + BpfRegId::InstructionCountRemaining => (), + } + Ok(()) + } +} + +impl<'a, 'b, C: ContextObject> SingleThreadResume for Interpreter<'a, 'b, C> { + fn resume(&mut self, signal: Option) -> Result<(), Self::Error> { + if signal.is_some() { + return Err("no support for continuing with signal"); + } + + self.debug_state = DebugState::Continue; + + Ok(()) + } + + #[inline(always)] + fn support_single_step( + &mut self, + ) -> Option> { + Some(self) + } +} + +impl<'a, 'b, C: ContextObject> target::ext::base::singlethread::SingleThreadSingleStep + for Interpreter<'a, 'b, C> +{ + fn step(&mut self, signal: Option) -> Result<(), Self::Error> { + if signal.is_some() { + return Err("no support for stepping with signal"); + } + + self.debug_state = DebugState::Step; + + Ok(()) + } +} + +impl<'a, 'b, C: ContextObject> target::ext::section_offsets::SectionOffsets + for Interpreter<'a, 'b, C> +{ + fn get_section_offsets(&mut self) -> Result, Self::Error> { + Ok(Offsets::Sections { + text: 0, + data: 0, + bss: None, + }) + } +} + +impl<'a, 'b, C: ContextObject> target::ext::breakpoints::Breakpoints for Interpreter<'a, 'b, C> { + #[inline(always)] + fn support_sw_breakpoint( + &mut self, + ) -> Option> { + Some(self) + } +} + +impl<'a, 'b, C: ContextObject> target::ext::breakpoints::SwBreakpoint for Interpreter<'a, 'b, C> { + fn add_sw_breakpoint( + &mut self, + addr: u64, + _kind: bpf_arch::BpfBreakpointKind, + ) -> TargetResult { + self.breakpoints.push(addr); + + Ok(true) + } + + fn remove_sw_breakpoint( + &mut self, + addr: u64, + _kind: bpf_arch::BpfBreakpointKind, + ) -> TargetResult { + match self.breakpoints.iter().position(|x| *x == addr) { + None => return Ok(false), + Some(pos) => self.breakpoints.remove(pos), + }; + + Ok(true) + } +} + +impl<'a, 'b, C: ContextObject> target::ext::lldb_register_info_override::LldbRegisterInfoOverride + for Interpreter<'a, 'b, C> +{ + fn lldb_register_info<'c>( + &mut self, + reg_id: usize, + reg_info: Callback<'c>, + ) -> Result, Self::Error> { + match BpfRegId::from_raw_id(reg_id) { + Some((_, None)) | None => Ok(reg_info.done()), + Some((r, Some(size))) => { + let name: String = match r { + BpfRegId::Gpr(i) => match i { + 0 => "r0", + 1 => "r1", + 2 => "r2", + 3 => "r3", + 4 => "r4", + 5 => "r5", + 6 => "r6", + 7 => "r7", + 8 => "r8", + 9 => "r9", + _ => "unknown", + }, + BpfRegId::Sp => "sp", + BpfRegId::Pc => "pc", + BpfRegId::InstructionCountRemaining => "remaining", + } + .into(); + let set = String::from("General Purpose Registers"); + let generic = match r { + BpfRegId::Sp => Some(Generic::Sp), + BpfRegId::Pc => Some(Generic::Pc), + _ => None, + }; + let reg = Register { + name: &name, + alt_name: None, + bitsize: (usize::from(size)) * 8, + offset: reg_id * (usize::from(size)), + encoding: Encoding::Uint, + format: Format::Hex, + set: &set, + gcc: None, + dwarf: Some(reg_id), + generic, + container_regs: None, + invalidate_regs: None, + }; + Ok(reg_info.write(reg)) + } + } + } +} + +mod bpf_arch { + use gdbstub::arch::{Arch, SingleStepGdbBehavior}; + + /// BPF-specific breakpoint kinds. + /// + /// Extracted from the GDB source code [BPF Breakpoint Kinds](https://github.com/bminor/binutils-gdb/blob/9e0f6329352ab9c5e2f278181a3875918cff3b27/gdb/bpf-tdep.c#L205) + #[derive(Debug)] + pub enum BpfBreakpointKind { + /// BPF breakpoint + BpfBpKindBrkpt, + } + + impl gdbstub::arch::BreakpointKind for BpfBreakpointKind { + fn from_usize(kind: usize) -> Option { + let kind = match kind { + 0 => BpfBreakpointKind::BpfBpKindBrkpt, + _ => return None, + }; + Some(kind) + } + } + + /// Implements `Arch` for BPF. + pub enum Bpf {} + + #[allow(deprecated)] + impl Arch for Bpf { + type Usize = u64; + type Registers = reg::BpfRegs; + type RegId = reg::id::BpfRegId; + type BreakpointKind = BpfBreakpointKind; + + #[inline(always)] + fn single_step_gdb_behavior() -> SingleStepGdbBehavior { + SingleStepGdbBehavior::Required + } + } + + pub mod reg { + pub use bpf::BpfRegs; + + mod bpf { + use core::convert::TryInto; + + use gdbstub::arch::Registers; + + /// BPF registers. + /// + /// Source: + #[derive(Debug, Default, Clone, Eq, PartialEq)] + pub struct BpfRegs { + /// General purpose registers (R0-R9) + pub r: [u64; 10], + /// Stack pointer (R10) + pub sp: u64, + /// Program counter (R11) + pub pc: u64, + } + + impl Registers for BpfRegs { + type ProgramCounter = u64; + + fn pc(&self) -> Self::ProgramCounter { + self.pc + } + + fn gdb_serialize(&self, mut write_byte: impl FnMut(Option)) { + macro_rules! write_bytes { + ($bytes:expr) => { + for b in $bytes { + write_byte(Some(*b)) + } + }; + } + + // Write GPRs + for reg in self.r.iter() { + write_bytes!(®.to_le_bytes()); + } + + // Write stack pointer register + write_bytes!(&self.sp.to_le_bytes()); + // Write program counter register + write_bytes!(&self.pc.to_le_bytes()); + } + + fn gdb_deserialize(&mut self, mut bytes: &[u8]) -> Result<(), ()> { + // Ensure bytes contains enough data for all 12 registers + if bytes.len() < (12 * 8) { + return Err(()); + } + + let mut next_reg = || { + if bytes.len() < 8 { + Err(()) + } else { + let (next, rest) = bytes.split_at(8); + bytes = rest; + Ok(u64::from_le_bytes(next.try_into().unwrap())) + } + }; + + // Read general purpose register + for reg in self.r.iter_mut() { + *reg = next_reg()? + } + self.sp = next_reg()?; + self.pc = next_reg()?; + + if next_reg().is_ok() { + return Err(()); + } + + Ok(()) + } + } + } + pub mod id { + use core::num::NonZeroUsize; + + use gdbstub::arch::RegId; + + /// BPF register identifier. + #[derive(Debug, Clone, Copy)] + #[non_exhaustive] + pub enum BpfRegId { + /// General purpose registers (R0-R9) + Gpr(u8), + /// Stack Pointer (R10) + Sp, + /// Program Counter (R11) + Pc, + /// Instruction Counter (pseudo register) + InstructionCountRemaining, + } + + impl RegId for BpfRegId { + fn from_raw_id(id: usize) -> Option<(BpfRegId, Option)> { + let reg = match id { + 0..=9 => { + return Some((BpfRegId::Gpr(id as u8), Some(NonZeroUsize::new(8)?))) + } + 10 => BpfRegId::Sp, + 11 => BpfRegId::Pc, + 12 => BpfRegId::InstructionCountRemaining, + _ => return None, + }; + Some((reg, Some(NonZeroUsize::new(8)?))) + } + } + } + } +} diff --git a/rbpf/src/disassembler.rs b/rbpf/src/disassembler.rs new file mode 100644 index 00000000000000..97fabd046a1085 --- /dev/null +++ b/rbpf/src/disassembler.rs @@ -0,0 +1,279 @@ +#![allow(clippy::arithmetic_side_effects)] +// Copyright 2017 6WIND S.A. +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +//! Functions in this module are used to handle eBPF programs with a higher level representation, +//! for example to disassemble the code into a human-readable format. + +use crate::{ + ebpf, + program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, + static_analysis::CfgNode, + vm::ContextObject, +}; +use std::collections::BTreeMap; + +fn resolve_label(cfg_nodes: &BTreeMap, pc: usize) -> &str { + cfg_nodes + .get(&pc) + .map(|cfg_node| cfg_node.label.as_str()) + .unwrap_or("[invalid]") +} + +#[inline] +fn alu_imm_str(name: &str, insn: &ebpf::Insn) -> String { + format!("{} r{}, {}", name, insn.dst, insn.imm) +} + +#[inline] +fn alu_reg_str(name: &str, insn: &ebpf::Insn) -> String { + format!("{} r{}, r{}", name, insn.dst, insn.src) +} + +#[inline] +fn byteswap_str(name: &str, insn: &ebpf::Insn) -> String { + match insn.imm { + 16 | 32 | 64 => {} + _ => println!("[Disassembler] Warning: Invalid offset value for {name} insn"), + } + format!("{}{} r{}", name, insn.imm, insn.dst) +} + +#[inline] +fn signed_off_str(value: i16) -> String { + if value < 0 { + format!("-{:#x}", -value) + } else { + format!("+{value:#x}") + } +} + +#[inline] +fn ld_st_imm_str(name: &str, insn: &ebpf::Insn) -> String { + format!( + "{} [r{}{}], {}", + name, + insn.dst, + signed_off_str(insn.off), + insn.imm + ) +} + +#[inline] +fn ld_reg_str(name: &str, insn: &ebpf::Insn) -> String { + format!( + "{} r{}, [r{}{}]", + name, + insn.dst, + insn.src, + signed_off_str(insn.off) + ) +} + +#[inline] +fn st_reg_str(name: &str, insn: &ebpf::Insn) -> String { + format!( + "{} [r{}{}], r{}", + name, + insn.dst, + signed_off_str(insn.off), + insn.src + ) +} + +#[inline] +fn jmp_imm_str(name: &str, insn: &ebpf::Insn, cfg_nodes: &BTreeMap) -> String { + let target_pc = (insn.ptr as isize + insn.off as isize + 1) as usize; + format!( + "{} r{}, {}, {}", + name, + insn.dst, + insn.imm, + resolve_label(cfg_nodes, target_pc) + ) +} + +#[inline] +fn jmp_reg_str(name: &str, insn: &ebpf::Insn, cfg_nodes: &BTreeMap) -> String { + let target_pc = (insn.ptr as isize + insn.off as isize + 1) as usize; + format!( + "{} r{}, r{}, {}", + name, + insn.dst, + insn.src, + resolve_label(cfg_nodes, target_pc) + ) +} + +/// Disassemble an eBPF instruction +#[rustfmt::skip] +pub fn disassemble_instruction( + insn: &ebpf::Insn, + cfg_nodes: &BTreeMap, + function_registry: &FunctionRegistry, + loader: &BuiltinProgram, + sbpf_version: &SBPFVersion, +) -> String { + let name; + let desc; + match insn.opc { + // BPF_LD class + ebpf::LD_DW_IMM => { name = "lddw"; desc = format!("{} r{:}, {:#x}", name, insn.dst, insn.imm); }, + + // BPF_LDX class + ebpf::LD_B_REG => { name = "ldxb"; desc = ld_reg_str(name, insn); }, + ebpf::LD_H_REG => { name = "ldxh"; desc = ld_reg_str(name, insn); }, + ebpf::LD_W_REG => { name = "ldxw"; desc = ld_reg_str(name, insn); }, + ebpf::LD_DW_REG => { name = "ldxdw"; desc = ld_reg_str(name, insn); }, + + // BPF_ST class + ebpf::ST_B_IMM => { name = "stb"; desc = ld_st_imm_str(name, insn); }, + ebpf::ST_H_IMM => { name = "sth"; desc = ld_st_imm_str(name, insn); }, + ebpf::ST_W_IMM => { name = "stw"; desc = ld_st_imm_str(name, insn); }, + ebpf::ST_DW_IMM => { name = "stdw"; desc = ld_st_imm_str(name, insn); }, + + // BPF_STX class + ebpf::ST_B_REG => { name = "stxb"; desc = st_reg_str(name, insn); }, + ebpf::ST_H_REG => { name = "stxh"; desc = st_reg_str(name, insn); }, + ebpf::ST_W_REG => { name = "stxw"; desc = st_reg_str(name, insn); }, + ebpf::ST_DW_REG => { name = "stxdw"; desc = st_reg_str(name, insn); }, + + // BPF_ALU class + ebpf::ADD32_IMM => { name = "add32"; desc = alu_imm_str(name, insn); }, + ebpf::ADD32_REG => { name = "add32"; desc = alu_reg_str(name, insn); }, + ebpf::SUB32_IMM => { name = "sub32"; desc = alu_imm_str(name, insn); }, + ebpf::SUB32_REG => { name = "sub32"; desc = alu_reg_str(name, insn); }, + ebpf::MUL32_IMM => { name = "mul32"; desc = alu_imm_str(name, insn); }, + ebpf::MUL32_REG => { name = "mul32"; desc = alu_reg_str(name, insn); }, + ebpf::DIV32_IMM => { name = "div32"; desc = alu_imm_str(name, insn); }, + ebpf::DIV32_REG => { name = "div32"; desc = alu_reg_str(name, insn); }, + ebpf::OR32_IMM => { name = "or32"; desc = alu_imm_str(name, insn); }, + ebpf::OR32_REG => { name = "or32"; desc = alu_reg_str(name, insn); }, + ebpf::AND32_IMM => { name = "and32"; desc = alu_imm_str(name, insn); }, + ebpf::AND32_REG => { name = "and32"; desc = alu_reg_str(name, insn); }, + ebpf::LSH32_IMM => { name = "lsh32"; desc = alu_imm_str(name, insn); }, + ebpf::LSH32_REG => { name = "lsh32"; desc = alu_reg_str(name, insn); }, + ebpf::RSH32_IMM => { name = "rsh32"; desc = alu_imm_str(name, insn); }, + ebpf::RSH32_REG => { name = "rsh32"; desc = alu_reg_str(name, insn); }, + ebpf::NEG32 => { name = "neg32"; desc = format!("{} r{}", name, insn.dst); }, + ebpf::MOD32_IMM => { name = "mod32"; desc = alu_imm_str(name, insn); }, + ebpf::MOD32_REG => { name = "mod32"; desc = alu_reg_str(name, insn); }, + ebpf::XOR32_IMM => { name = "xor32"; desc = alu_imm_str(name, insn); }, + ebpf::XOR32_REG => { name = "xor32"; desc = alu_reg_str(name, insn); }, + ebpf::MOV32_IMM => { name = "mov32"; desc = alu_imm_str(name, insn); }, + ebpf::MOV32_REG => { name = "mov32"; desc = alu_reg_str(name, insn); }, + ebpf::ARSH32_IMM => { name = "arsh32"; desc = alu_imm_str(name, insn); }, + ebpf::ARSH32_REG => { name = "arsh32"; desc = alu_reg_str(name, insn); }, + ebpf::LE => { name = "le"; desc = byteswap_str(name, insn); }, + ebpf::BE => { name = "be"; desc = byteswap_str(name, insn); }, + + // BPF_ALU64 class + ebpf::ADD64_IMM => { name = "add64"; desc = alu_imm_str(name, insn); }, + ebpf::ADD64_REG => { name = "add64"; desc = alu_reg_str(name, insn); }, + ebpf::SUB64_IMM => { name = "sub64"; desc = alu_imm_str(name, insn); }, + ebpf::SUB64_REG => { name = "sub64"; desc = alu_reg_str(name, insn); }, + ebpf::MUL64_IMM => { name = "mul64"; desc = alu_imm_str(name, insn); }, + ebpf::MUL64_REG => { name = "mul64"; desc = alu_reg_str(name, insn); }, + ebpf::DIV64_IMM => { name = "div64"; desc = alu_imm_str(name, insn); }, + ebpf::DIV64_REG => { name = "div64"; desc = alu_reg_str(name, insn); }, + ebpf::OR64_IMM => { name = "or64"; desc = alu_imm_str(name, insn); }, + ebpf::OR64_REG => { name = "or64"; desc = alu_reg_str(name, insn); }, + ebpf::AND64_IMM => { name = "and64"; desc = alu_imm_str(name, insn); }, + ebpf::AND64_REG => { name = "and64"; desc = alu_reg_str(name, insn); }, + ebpf::LSH64_IMM => { name = "lsh64"; desc = alu_imm_str(name, insn); }, + ebpf::LSH64_REG => { name = "lsh64"; desc = alu_reg_str(name, insn); }, + ebpf::RSH64_IMM => { name = "rsh64"; desc = alu_imm_str(name, insn); }, + ebpf::RSH64_REG => { name = "rsh64"; desc = alu_reg_str(name, insn); }, + ebpf::NEG64 => { name = "neg64"; desc = format!("{} r{}", name, insn.dst); }, + ebpf::MOD64_IMM => { name = "mod64"; desc = alu_imm_str(name, insn); }, + ebpf::MOD64_REG => { name = "mod64"; desc = alu_reg_str(name, insn); }, + ebpf::XOR64_IMM => { name = "xor64"; desc = alu_imm_str(name, insn); }, + ebpf::XOR64_REG => { name = "xor64"; desc = alu_reg_str(name, insn); }, + ebpf::MOV64_IMM => { name = "mov64"; desc = alu_imm_str(name, insn); }, + ebpf::MOV64_REG => { name = "mov64"; desc = alu_reg_str(name, insn); }, + ebpf::ARSH64_IMM => { name = "arsh64"; desc = alu_imm_str(name, insn); }, + ebpf::ARSH64_REG => { name = "arsh64"; desc = alu_reg_str(name, insn); }, + ebpf::HOR64_IMM => { name = "hor64"; desc = alu_imm_str(name, insn); }, + + // BPF_PQR class + ebpf::LMUL32_IMM => { name = "lmul32"; desc = alu_imm_str(name, insn); }, + ebpf::LMUL32_REG => { name = "lmul32"; desc = alu_reg_str(name, insn); }, + ebpf::LMUL64_IMM => { name = "lmul64"; desc = alu_imm_str(name, insn); }, + ebpf::LMUL64_REG => { name = "lmul64"; desc = alu_reg_str(name, insn); }, + ebpf::UHMUL64_IMM => { name = "uhmul64"; desc = alu_imm_str(name, insn); }, + ebpf::UHMUL64_REG => { name = "uhmul64"; desc = alu_reg_str(name, insn); }, + ebpf::SHMUL64_IMM => { name = "shmul64"; desc = alu_imm_str(name, insn); }, + ebpf::SHMUL64_REG => { name = "shmul64"; desc = alu_reg_str(name, insn); }, + ebpf::UDIV32_IMM => { name = "udiv32"; desc = alu_imm_str(name, insn); }, + ebpf::UDIV32_REG => { name = "udiv32"; desc = alu_reg_str(name, insn); }, + ebpf::UDIV64_IMM => { name = "udiv64"; desc = alu_imm_str(name, insn); }, + ebpf::UDIV64_REG => { name = "udiv64"; desc = alu_reg_str(name, insn); }, + ebpf::UREM32_IMM => { name = "urem32"; desc = alu_imm_str(name, insn); }, + ebpf::UREM32_REG => { name = "urem32"; desc = alu_reg_str(name, insn); }, + ebpf::UREM64_IMM => { name = "urem64"; desc = alu_imm_str(name, insn); }, + ebpf::UREM64_REG => { name = "urem64"; desc = alu_reg_str(name, insn); }, + ebpf::SDIV32_IMM => { name = "sdiv32"; desc = alu_imm_str(name, insn); }, + ebpf::SDIV32_REG => { name = "sdiv32"; desc = alu_reg_str(name, insn); }, + ebpf::SDIV64_IMM => { name = "sdiv64"; desc = alu_imm_str(name, insn); }, + ebpf::SDIV64_REG => { name = "sdiv64"; desc = alu_reg_str(name, insn); }, + ebpf::SREM32_IMM => { name = "srem32"; desc = alu_imm_str(name, insn); }, + ebpf::SREM32_REG => { name = "srem32"; desc = alu_reg_str(name, insn); }, + ebpf::SREM64_IMM => { name = "srem64"; desc = alu_imm_str(name, insn); }, + ebpf::SREM64_REG => { name = "srem64"; desc = alu_reg_str(name, insn); }, + + // BPF_JMP class + ebpf::JA => { + name = "ja"; + let target_pc = (insn.ptr as isize + insn.off as isize + 1) as usize; + desc = format!("{} {}", name, resolve_label(cfg_nodes, target_pc)); + }, + ebpf::JEQ_IMM => { name = "jeq"; desc = jmp_imm_str(name, insn, cfg_nodes); }, + ebpf::JEQ_REG => { name = "jeq"; desc = jmp_reg_str(name, insn, cfg_nodes); }, + ebpf::JGT_IMM => { name = "jgt"; desc = jmp_imm_str(name, insn, cfg_nodes); }, + ebpf::JGT_REG => { name = "jgt"; desc = jmp_reg_str(name, insn, cfg_nodes); }, + ebpf::JGE_IMM => { name = "jge"; desc = jmp_imm_str(name, insn, cfg_nodes); }, + ebpf::JGE_REG => { name = "jge"; desc = jmp_reg_str(name, insn, cfg_nodes); }, + ebpf::JLT_IMM => { name = "jlt"; desc = jmp_imm_str(name, insn, cfg_nodes); }, + ebpf::JLT_REG => { name = "jlt"; desc = jmp_reg_str(name, insn, cfg_nodes); }, + ebpf::JLE_IMM => { name = "jle"; desc = jmp_imm_str(name, insn, cfg_nodes); }, + ebpf::JLE_REG => { name = "jle"; desc = jmp_reg_str(name, insn, cfg_nodes); }, + ebpf::JSET_IMM => { name = "jset"; desc = jmp_imm_str(name, insn, cfg_nodes); }, + ebpf::JSET_REG => { name = "jset"; desc = jmp_reg_str(name, insn, cfg_nodes); }, + ebpf::JNE_IMM => { name = "jne"; desc = jmp_imm_str(name, insn, cfg_nodes); }, + ebpf::JNE_REG => { name = "jne"; desc = jmp_reg_str(name, insn, cfg_nodes); }, + ebpf::JSGT_IMM => { name = "jsgt"; desc = jmp_imm_str(name, insn, cfg_nodes); }, + ebpf::JSGT_REG => { name = "jsgt"; desc = jmp_reg_str(name, insn, cfg_nodes); }, + ebpf::JSGE_IMM => { name = "jsge"; desc = jmp_imm_str(name, insn, cfg_nodes); }, + ebpf::JSGE_REG => { name = "jsge"; desc = jmp_reg_str(name, insn, cfg_nodes); }, + ebpf::JSLT_IMM => { name = "jslt"; desc = jmp_imm_str(name, insn, cfg_nodes); }, + ebpf::JSLT_REG => { name = "jslt"; desc = jmp_reg_str(name, insn, cfg_nodes); }, + ebpf::JSLE_IMM => { name = "jsle"; desc = jmp_imm_str(name, insn, cfg_nodes); }, + ebpf::JSLE_REG => { name = "jsle"; desc = jmp_reg_str(name, insn, cfg_nodes); }, + ebpf::CALL_IMM => { + let mut function_name = None; + if sbpf_version.static_syscalls() { + if insn.src != 0 { + function_name = Some(resolve_label(cfg_nodes, insn.imm as usize).to_string()); + } + } else { + function_name = function_registry.lookup_by_key(insn.imm as u32).map(|(function_name, _)| String::from_utf8_lossy(function_name).to_string()); + } + let function_name = if let Some(function_name) = function_name { + name = "call"; + function_name + } else { + name = "syscall"; + loader.get_function_registry().lookup_by_key(insn.imm as u32).map(|(function_name, _)| String::from_utf8_lossy(function_name).to_string()).unwrap_or_else(|| "[invalid]".to_string()) + }; + desc = format!("{name} {function_name}"); + }, + ebpf::CALL_REG => { name = "callx"; desc = format!("{} r{}", name, if sbpf_version.callx_uses_src_reg() { insn.src } else { insn.imm as u8 }); }, + ebpf::EXIT => { name = "exit"; desc = name.to_string(); }, + + _ => { name = "unknown"; desc = format!("{} opcode={:#x}", name, insn.opc); }, + }; + desc +} diff --git a/rbpf/src/ebpf.rs b/rbpf/src/ebpf.rs new file mode 100644 index 00000000000000..9507e8d17c4718 --- /dev/null +++ b/rbpf/src/ebpf.rs @@ -0,0 +1,620 @@ +#![allow(clippy::arithmetic_side_effects)] +// Copyright 2016 6WIND S.A. +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +//! This module contains all the definitions related to eBPF, and some functions permitting to +//! manipulate eBPF instructions. +//! +//! The number of bytes in an instruction, the maximum number of instructions in a program, and +//! also all operation codes are defined here as constants. +//! +//! The structure for an instruction used by this crate, as well as the function to extract it from +//! a program, is also defined in the module. +//! +//! To learn more about these instructions, see the Linux kernel documentation: +//! , or for a shorter version of +//! the list of the operation codes: + +use byteorder::{ByteOrder, LittleEndian}; +use hash32::{Hash, Hasher, Murmur3Hasher}; +use std::fmt; + +/// Solana BPF version flag +pub const EF_SBPF_V2: u32 = 0x20; +/// Maximum number of instructions in an eBPF program. +pub const PROG_MAX_INSNS: usize = 65_536; +/// Size of an eBPF instructions, in bytes. +pub const INSN_SIZE: usize = 8; +/// Frame pointer register +pub const FRAME_PTR_REG: usize = 10; +/// Stack pointer register +pub const STACK_PTR_REG: usize = 11; +/// First scratch register +pub const FIRST_SCRATCH_REG: usize = 6; +/// Number of scratch registers +pub const SCRATCH_REGS: usize = 4; +/// Alignment of the memory regions in host address space in bytes +pub const HOST_ALIGN: usize = 16; +/// Upper half of a pointer is the region index, lower half the virtual address inside that region. +pub const VIRTUAL_ADDRESS_BITS: usize = 32; + +// Memory map regions virtual addresses need to be (1 << VIRTUAL_ADDRESS_BITS) bytes apart. +// Also the region at index 0 should be skipped to catch NULL ptr accesses. + +/// Start of the program bits (text and ro segments) in the memory map +pub const MM_PROGRAM_START: u64 = 0x100000000; +/// Start of the stack in the memory map +pub const MM_STACK_START: u64 = 0x200000000; +/// Start of the heap in the memory map +pub const MM_HEAP_START: u64 = 0x300000000; +/// Start of the input buffers in the memory map +pub const MM_INPUT_START: u64 = 0x400000000; + +// eBPF op codes. +// See also https://www.kernel.org/doc/Documentation/networking/filter.txt + +// Three least significant bits are operation class: +/// BPF operation class: load from immediate. [DEPRECATED] +pub const BPF_LD: u8 = 0x00; +/// BPF operation class: load from register. +pub const BPF_LDX: u8 = 0x01; +/// BPF operation class: store immediate. +pub const BPF_ST: u8 = 0x02; +/// BPF operation class: store value from register. +pub const BPF_STX: u8 = 0x03; +/// BPF operation class: 32 bits arithmetic operation. +pub const BPF_ALU: u8 = 0x04; +/// BPF operation class: jump. +pub const BPF_JMP: u8 = 0x05; +/// BPF operation class: product / quotient / remainder. +pub const BPF_PQR: u8 = 0x06; +/// BPF operation class: 64 bits arithmetic operation. +pub const BPF_ALU64: u8 = 0x07; + +// For load and store instructions: +// +------------+--------+------------+ +// | 3 bits | 2 bits | 3 bits | +// | mode | size | insn class | +// +------------+--------+------------+ +// (MSB) (LSB) + +// Size modifiers: +/// BPF size modifier: word (4 bytes). +pub const BPF_W: u8 = 0x00; +/// BPF size modifier: half-word (2 bytes). +pub const BPF_H: u8 = 0x08; +/// BPF size modifier: byte (1 byte). +pub const BPF_B: u8 = 0x10; +/// BPF size modifier: double word (8 bytes). +pub const BPF_DW: u8 = 0x18; + +// Mode modifiers: +/// BPF mode modifier: immediate value. +pub const BPF_IMM: u8 = 0x00; +/// BPF mode modifier: absolute load. +pub const BPF_ABS: u8 = 0x20; +/// BPF mode modifier: indirect load. +pub const BPF_IND: u8 = 0x40; +/// BPF mode modifier: load from / store to memory. +pub const BPF_MEM: u8 = 0x60; +// [ 0x80 reserved ] +// [ 0xa0 reserved ] +// [ 0xc0 reserved ] + +// For arithmetic (BPF_ALU/BPF_ALU64) and jump (BPF_JMP) instructions: +// +----------------+--------+--------+ +// | 4 bits |1 b.| 3 bits | +// | operation code | src| insn class | +// +----------------+----+------------+ +// (MSB) (LSB) + +// Source modifiers: +/// BPF source operand modifier: 32-bit immediate value. +pub const BPF_K: u8 = 0x00; +/// BPF source operand modifier: `src` register. +pub const BPF_X: u8 = 0x08; + +// Operation codes -- BPF_ALU or BPF_ALU64 classes: +/// BPF ALU/ALU64 operation code: addition. +pub const BPF_ADD: u8 = 0x00; +/// BPF ALU/ALU64 operation code: subtraction. +pub const BPF_SUB: u8 = 0x10; +/// BPF ALU/ALU64 operation code: multiplication. [DEPRECATED] +pub const BPF_MUL: u8 = 0x20; +/// BPF ALU/ALU64 operation code: division. [DEPRECATED] +pub const BPF_DIV: u8 = 0x30; +/// BPF ALU/ALU64 operation code: or. +pub const BPF_OR: u8 = 0x40; +/// BPF ALU/ALU64 operation code: and. +pub const BPF_AND: u8 = 0x50; +/// BPF ALU/ALU64 operation code: left shift. +pub const BPF_LSH: u8 = 0x60; +/// BPF ALU/ALU64 operation code: right shift. +pub const BPF_RSH: u8 = 0x70; +/// BPF ALU/ALU64 operation code: negation. [DEPRECATED] +pub const BPF_NEG: u8 = 0x80; +/// BPF ALU/ALU64 operation code: modulus. [DEPRECATED] +pub const BPF_MOD: u8 = 0x90; +/// BPF ALU/ALU64 operation code: exclusive or. +pub const BPF_XOR: u8 = 0xa0; +/// BPF ALU/ALU64 operation code: move. +pub const BPF_MOV: u8 = 0xb0; +/// BPF ALU/ALU64 operation code: sign extending right shift. +pub const BPF_ARSH: u8 = 0xc0; +/// BPF ALU/ALU64 operation code: endianness conversion. +pub const BPF_END: u8 = 0xd0; +/// BPF ALU/ALU64 operation code: high or. +pub const BPF_HOR: u8 = 0xf0; + +// Operation codes -- BPF_PQR class: +// 7 6 5 4 3 2-0 +// 0 Unsigned Multiplication Product Lower Half / Quotient 32 Bit Immediate PQR +// 1 Signed Division Product Upper Half / Remainder 64 Bit Register PQR +/// BPF PQR operation code: unsigned high multiplication. +pub const BPF_UHMUL: u8 = 0x20; +/// BPF PQR operation code: unsigned division quotient. +pub const BPF_UDIV: u8 = 0x40; +/// BPF PQR operation code: unsigned division remainder. +pub const BPF_UREM: u8 = 0x60; +/// BPF PQR operation code: low multiplication. +pub const BPF_LMUL: u8 = 0x80; +/// BPF PQR operation code: signed high multiplication. +pub const BPF_SHMUL: u8 = 0xA0; +/// BPF PQR operation code: signed division quotient. +pub const BPF_SDIV: u8 = 0xC0; +/// BPF PQR operation code: signed division remainder. +pub const BPF_SREM: u8 = 0xE0; + +// Operation codes -- BPF_JMP class: +/// BPF JMP operation code: jump. +pub const BPF_JA: u8 = 0x00; +/// BPF JMP operation code: jump if equal. +pub const BPF_JEQ: u8 = 0x10; +/// BPF JMP operation code: jump if greater than. +pub const BPF_JGT: u8 = 0x20; +/// BPF JMP operation code: jump if greater or equal. +pub const BPF_JGE: u8 = 0x30; +/// BPF JMP operation code: jump if `src` & `reg`. +pub const BPF_JSET: u8 = 0x40; +/// BPF JMP operation code: jump if not equal. +pub const BPF_JNE: u8 = 0x50; +/// BPF JMP operation code: jump if greater than (signed). +pub const BPF_JSGT: u8 = 0x60; +/// BPF JMP operation code: jump if greater or equal (signed). +pub const BPF_JSGE: u8 = 0x70; +/// BPF JMP operation code: syscall function call. +pub const BPF_CALL: u8 = 0x80; +/// BPF JMP operation code: return from program. +pub const BPF_EXIT: u8 = 0x90; +/// BPF JMP operation code: jump if lower than. +pub const BPF_JLT: u8 = 0xa0; +/// BPF JMP operation code: jump if lower or equal. +pub const BPF_JLE: u8 = 0xb0; +/// BPF JMP operation code: jump if lower than (signed). +pub const BPF_JSLT: u8 = 0xc0; +/// BPF JMP operation code: jump if lower or equal (signed). +pub const BPF_JSLE: u8 = 0xd0; + +// Op codes +// (Following operation names are not “official”, but may be proper to rbpf; Linux kernel only +// combines above flags and does not attribute a name per operation.) + +/// BPF opcode: `lddw dst, imm` /// `dst = imm`. [DEPRECATED] +pub const LD_DW_IMM: u8 = BPF_LD | BPF_IMM | BPF_DW; +/// BPF opcode: `ldxb dst, [src + off]` /// `dst = (src + off) as u8`. +pub const LD_B_REG: u8 = BPF_LDX | BPF_MEM | BPF_B; +/// BPF opcode: `ldxh dst, [src + off]` /// `dst = (src + off) as u16`. +pub const LD_H_REG: u8 = BPF_LDX | BPF_MEM | BPF_H; +/// BPF opcode: `ldxw dst, [src + off]` /// `dst = (src + off) as u32`. +pub const LD_W_REG: u8 = BPF_LDX | BPF_MEM | BPF_W; +/// BPF opcode: `ldxdw dst, [src + off]` /// `dst = (src + off) as u64`. +pub const LD_DW_REG: u8 = BPF_LDX | BPF_MEM | BPF_DW; +/// BPF opcode: `stb [dst + off], imm` /// `(dst + offset) as u8 = imm`. +pub const ST_B_IMM: u8 = BPF_ST | BPF_MEM | BPF_B; +/// BPF opcode: `sth [dst + off], imm` /// `(dst + offset) as u16 = imm`. +pub const ST_H_IMM: u8 = BPF_ST | BPF_MEM | BPF_H; +/// BPF opcode: `stw [dst + off], imm` /// `(dst + offset) as u32 = imm`. +pub const ST_W_IMM: u8 = BPF_ST | BPF_MEM | BPF_W; +/// BPF opcode: `stdw [dst + off], imm` /// `(dst + offset) as u64 = imm`. +pub const ST_DW_IMM: u8 = BPF_ST | BPF_MEM | BPF_DW; +/// BPF opcode: `stxb [dst + off], src` /// `(dst + offset) as u8 = src`. +pub const ST_B_REG: u8 = BPF_STX | BPF_MEM | BPF_B; +/// BPF opcode: `stxh [dst + off], src` /// `(dst + offset) as u16 = src`. +pub const ST_H_REG: u8 = BPF_STX | BPF_MEM | BPF_H; +/// BPF opcode: `stxw [dst + off], src` /// `(dst + offset) as u32 = src`. +pub const ST_W_REG: u8 = BPF_STX | BPF_MEM | BPF_W; +/// BPF opcode: `stxdw [dst + off], src` /// `(dst + offset) as u64 = src`. +pub const ST_DW_REG: u8 = BPF_STX | BPF_MEM | BPF_DW; + +/// BPF opcode: `add32 dst, imm` /// `dst += imm`. +pub const ADD32_IMM: u8 = BPF_ALU | BPF_K | BPF_ADD; +/// BPF opcode: `add32 dst, src` /// `dst += src`. +pub const ADD32_REG: u8 = BPF_ALU | BPF_X | BPF_ADD; +/// BPF opcode: `sub32 dst, imm` /// `dst = imm - dst`. +pub const SUB32_IMM: u8 = BPF_ALU | BPF_K | BPF_SUB; +/// BPF opcode: `sub32 dst, src` /// `dst -= src`. +pub const SUB32_REG: u8 = BPF_ALU | BPF_X | BPF_SUB; +/// BPF opcode: `mul32 dst, imm` /// `dst *= imm`. +pub const MUL32_IMM: u8 = BPF_ALU | BPF_K | BPF_MUL; +/// BPF opcode: `mul32 dst, src` /// `dst *= src`. +pub const MUL32_REG: u8 = BPF_ALU | BPF_X | BPF_MUL; +/// BPF opcode: `div32 dst, imm` /// `dst /= imm`. +pub const DIV32_IMM: u8 = BPF_ALU | BPF_K | BPF_DIV; +/// BPF opcode: `div32 dst, src` /// `dst /= src`. +pub const DIV32_REG: u8 = BPF_ALU | BPF_X | BPF_DIV; +/// BPF opcode: `or32 dst, imm` /// `dst |= imm`. +pub const OR32_IMM: u8 = BPF_ALU | BPF_K | BPF_OR; +/// BPF opcode: `or32 dst, src` /// `dst |= src`. +pub const OR32_REG: u8 = BPF_ALU | BPF_X | BPF_OR; +/// BPF opcode: `and32 dst, imm` /// `dst &= imm`. +pub const AND32_IMM: u8 = BPF_ALU | BPF_K | BPF_AND; +/// BPF opcode: `and32 dst, src` /// `dst &= src`. +pub const AND32_REG: u8 = BPF_ALU | BPF_X | BPF_AND; +/// BPF opcode: `lsh32 dst, imm` /// `dst <<= imm`. +pub const LSH32_IMM: u8 = BPF_ALU | BPF_K | BPF_LSH; +/// BPF opcode: `lsh32 dst, src` /// `dst <<= src`. +pub const LSH32_REG: u8 = BPF_ALU | BPF_X | BPF_LSH; +/// BPF opcode: `rsh32 dst, imm` /// `dst >>= imm`. +pub const RSH32_IMM: u8 = BPF_ALU | BPF_K | BPF_RSH; +/// BPF opcode: `rsh32 dst, src` /// `dst >>= src`. +pub const RSH32_REG: u8 = BPF_ALU | BPF_X | BPF_RSH; +/// BPF opcode: `neg32 dst` /// `dst = -dst`. +pub const NEG32: u8 = BPF_ALU | BPF_NEG; +/// BPF opcode: `mod32 dst, imm` /// `dst %= imm`. +pub const MOD32_IMM: u8 = BPF_ALU | BPF_K | BPF_MOD; +/// BPF opcode: `mod32 dst, src` /// `dst %= src`. +pub const MOD32_REG: u8 = BPF_ALU | BPF_X | BPF_MOD; +/// BPF opcode: `xor32 dst, imm` /// `dst ^= imm`. +pub const XOR32_IMM: u8 = BPF_ALU | BPF_K | BPF_XOR; +/// BPF opcode: `xor32 dst, src` /// `dst ^= src`. +pub const XOR32_REG: u8 = BPF_ALU | BPF_X | BPF_XOR; +/// BPF opcode: `mov32 dst, imm` /// `dst = imm`. +pub const MOV32_IMM: u8 = BPF_ALU | BPF_K | BPF_MOV; +/// BPF opcode: `mov32 dst, src` /// `dst = src`. +pub const MOV32_REG: u8 = BPF_ALU | BPF_X | BPF_MOV; +/// BPF opcode: `arsh32 dst, imm` /// `dst >>= imm (arithmetic)`. +pub const ARSH32_IMM: u8 = BPF_ALU | BPF_K | BPF_ARSH; +/// BPF opcode: `arsh32 dst, src` /// `dst >>= src (arithmetic)`. +pub const ARSH32_REG: u8 = BPF_ALU | BPF_X | BPF_ARSH; + +/// BPF opcode: `lmul32 dst, imm` /// `dst *= (dst * imm) as u32`. +pub const LMUL32_IMM: u8 = BPF_PQR | BPF_K | BPF_LMUL; +/// BPF opcode: `lmul32 dst, src` /// `dst *= (dst * src) as u32`. +pub const LMUL32_REG: u8 = BPF_PQR | BPF_X | BPF_LMUL; +/// BPF opcode: `uhmul32 dst, imm` /// `dst = (dst * imm) as u64`. +// pub const UHMUL32_IMM: u8 = BPF_PQR | BPF_K | BPF_UHMUL; +/// BPF opcode: `uhmul32 dst, src` /// `dst = (dst * src) as u64`. +// pub const UHMUL32_REG: u8 = BPF_PQR | BPF_X | BPF_UHMUL; +/// BPF opcode: `udiv32 dst, imm` /// `dst /= imm`. +pub const UDIV32_IMM: u8 = BPF_PQR | BPF_K | BPF_UDIV; +/// BPF opcode: `udiv32 dst, src` /// `dst /= src`. +pub const UDIV32_REG: u8 = BPF_PQR | BPF_X | BPF_UDIV; +/// BPF opcode: `urem32 dst, imm` /// `dst %= imm`. +pub const UREM32_IMM: u8 = BPF_PQR | BPF_K | BPF_UREM; +/// BPF opcode: `urem32 dst, src` /// `dst %= src`. +pub const UREM32_REG: u8 = BPF_PQR | BPF_X | BPF_UREM; +/// BPF opcode: `shmul32 dst, imm` /// `dst = (dst * imm) as i64`. +// pub const SHMUL32_IMM: u8 = BPF_PQR | BPF_K | BPF_SHMUL; +/// BPF opcode: `shmul32 dst, src` /// `dst = (dst * src) as i64`. +// pub const SHMUL32_REG: u8 = BPF_PQR | BPF_X | BPF_SHMUL; +/// BPF opcode: `sdiv32 dst, imm` /// `dst /= imm`. +pub const SDIV32_IMM: u8 = BPF_PQR | BPF_K | BPF_SDIV; +/// BPF opcode: `sdiv32 dst, src` /// `dst /= src`. +pub const SDIV32_REG: u8 = BPF_PQR | BPF_X | BPF_SDIV; +/// BPF opcode: `srem32 dst, imm` /// `dst %= imm`. +pub const SREM32_IMM: u8 = BPF_PQR | BPF_K | BPF_SREM; +/// BPF opcode: `srem32 dst, src` /// `dst %= src`. +pub const SREM32_REG: u8 = BPF_PQR | BPF_X | BPF_SREM; + +/// BPF opcode: `le dst` /// `dst = htole(dst), with imm in {16, 32, 64}`. +pub const LE: u8 = BPF_ALU | BPF_K | BPF_END; +/// BPF opcode: `be dst` /// `dst = htobe(dst), with imm in {16, 32, 64}`. +pub const BE: u8 = BPF_ALU | BPF_X | BPF_END; + +/// BPF opcode: `add64 dst, imm` /// `dst += imm`. +pub const ADD64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_ADD; +/// BPF opcode: `add64 dst, src` /// `dst += src`. +pub const ADD64_REG: u8 = BPF_ALU64 | BPF_X | BPF_ADD; +/// BPF opcode: `sub64 dst, imm` /// `dst -= imm`. +pub const SUB64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_SUB; +/// BPF opcode: `sub64 dst, src` /// `dst -= src`. +pub const SUB64_REG: u8 = BPF_ALU64 | BPF_X | BPF_SUB; +/// BPF opcode: `div64 dst, imm` /// `dst /= imm`. +pub const MUL64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_MUL; +/// BPF opcode: `div64 dst, src` /// `dst /= src`. +pub const MUL64_REG: u8 = BPF_ALU64 | BPF_X | BPF_MUL; +/// BPF opcode: `div64 dst, imm` /// `dst /= imm`. +pub const DIV64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_DIV; +/// BPF opcode: `div64 dst, src` /// `dst /= src`. +pub const DIV64_REG: u8 = BPF_ALU64 | BPF_X | BPF_DIV; +/// BPF opcode: `or64 dst, imm` /// `dst |= imm`. +pub const OR64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_OR; +/// BPF opcode: `or64 dst, src` /// `dst |= src`. +pub const OR64_REG: u8 = BPF_ALU64 | BPF_X | BPF_OR; +/// BPF opcode: `and64 dst, imm` /// `dst &= imm`. +pub const AND64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_AND; +/// BPF opcode: `and64 dst, src` /// `dst &= src`. +pub const AND64_REG: u8 = BPF_ALU64 | BPF_X | BPF_AND; +/// BPF opcode: `lsh64 dst, imm` /// `dst <<= imm`. +pub const LSH64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_LSH; +/// BPF opcode: `lsh64 dst, src` /// `dst <<= src`. +pub const LSH64_REG: u8 = BPF_ALU64 | BPF_X | BPF_LSH; +/// BPF opcode: `rsh64 dst, imm` /// `dst >>= imm`. +pub const RSH64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_RSH; +/// BPF opcode: `rsh64 dst, src` /// `dst >>= src`. +pub const RSH64_REG: u8 = BPF_ALU64 | BPF_X | BPF_RSH; +/// BPF opcode: `neg64 dst, imm` /// `dst = -dst`. +pub const NEG64: u8 = BPF_ALU64 | BPF_NEG; +/// BPF opcode: `mod64 dst, imm` /// `dst %= imm`. +pub const MOD64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_MOD; +/// BPF opcode: `mod64 dst, src` /// `dst %= src`. +pub const MOD64_REG: u8 = BPF_ALU64 | BPF_X | BPF_MOD; +/// BPF opcode: `xor64 dst, imm` /// `dst ^= imm`. +pub const XOR64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_XOR; +/// BPF opcode: `xor64 dst, src` /// `dst ^= src`. +pub const XOR64_REG: u8 = BPF_ALU64 | BPF_X | BPF_XOR; +/// BPF opcode: `mov64 dst, imm` /// `dst = imm`. +pub const MOV64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_MOV; +/// BPF opcode: `mov64 dst, src` /// `dst = src`. +pub const MOV64_REG: u8 = BPF_ALU64 | BPF_X | BPF_MOV; +/// BPF opcode: `arsh64 dst, imm` /// `dst >>= imm (arithmetic)`. +pub const ARSH64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_ARSH; +/// BPF opcode: `arsh64 dst, src` /// `dst >>= src (arithmetic)`. +pub const ARSH64_REG: u8 = BPF_ALU64 | BPF_X | BPF_ARSH; +/// BPF opcode: `hor64 dst, imm` /// `dst |= imm << 32`. +pub const HOR64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_HOR; + +/// BPF opcode: `lmul64 dst, imm` /// `dst = (dst * imm) as u64`. +pub const LMUL64_IMM: u8 = BPF_PQR | BPF_B | BPF_K | BPF_LMUL; +/// BPF opcode: `lmul64 dst, src` /// `dst = (dst * src) as u64`. +pub const LMUL64_REG: u8 = BPF_PQR | BPF_B | BPF_X | BPF_LMUL; +/// BPF opcode: `uhmul64 dst, imm` /// `dst = (dst * imm) >> 64`. +pub const UHMUL64_IMM: u8 = BPF_PQR | BPF_B | BPF_K | BPF_UHMUL; +/// BPF opcode: `uhmul64 dst, src` /// `dst = (dst * src) >> 64`. +pub const UHMUL64_REG: u8 = BPF_PQR | BPF_B | BPF_X | BPF_UHMUL; +/// BPF opcode: `udiv64 dst, imm` /// `dst /= imm`. +pub const UDIV64_IMM: u8 = BPF_PQR | BPF_B | BPF_K | BPF_UDIV; +/// BPF opcode: `udiv64 dst, src` /// `dst /= src`. +pub const UDIV64_REG: u8 = BPF_PQR | BPF_B | BPF_X | BPF_UDIV; +/// BPF opcode: `urem64 dst, imm` /// `dst %= imm`. +pub const UREM64_IMM: u8 = BPF_PQR | BPF_B | BPF_K | BPF_UREM; +/// BPF opcode: `urem64 dst, src` /// `dst %= src`. +pub const UREM64_REG: u8 = BPF_PQR | BPF_B | BPF_X | BPF_UREM; +/// BPF opcode: `shmul64 dst, imm` /// `dst = (dst * imm) >> 64`. +pub const SHMUL64_IMM: u8 = BPF_PQR | BPF_B | BPF_K | BPF_SHMUL; +/// BPF opcode: `shmul64 dst, src` /// `dst = (dst * src) >> 64`. +pub const SHMUL64_REG: u8 = BPF_PQR | BPF_B | BPF_X | BPF_SHMUL; +/// BPF opcode: `sdiv64 dst, imm` /// `dst /= imm`. +pub const SDIV64_IMM: u8 = BPF_PQR | BPF_B | BPF_K | BPF_SDIV; +/// BPF opcode: `sdiv64 dst, src` /// `dst /= src`. +pub const SDIV64_REG: u8 = BPF_PQR | BPF_B | BPF_X | BPF_SDIV; +/// BPF opcode: `srem64 dst, imm` /// `dst %= imm`. +pub const SREM64_IMM: u8 = BPF_PQR | BPF_B | BPF_K | BPF_SREM; +/// BPF opcode: `srem64 dst, src` /// `dst %= src`. +pub const SREM64_REG: u8 = BPF_PQR | BPF_B | BPF_X | BPF_SREM; + +/// BPF opcode: `ja +off` /// `PC += off`. +pub const JA: u8 = BPF_JMP | BPF_JA; +/// BPF opcode: `jeq dst, imm, +off` /// `PC += off if dst == imm`. +pub const JEQ_IMM: u8 = BPF_JMP | BPF_K | BPF_JEQ; +/// BPF opcode: `jeq dst, src, +off` /// `PC += off if dst == src`. +pub const JEQ_REG: u8 = BPF_JMP | BPF_X | BPF_JEQ; +/// BPF opcode: `jgt dst, imm, +off` /// `PC += off if dst > imm`. +pub const JGT_IMM: u8 = BPF_JMP | BPF_K | BPF_JGT; +/// BPF opcode: `jgt dst, src, +off` /// `PC += off if dst > src`. +pub const JGT_REG: u8 = BPF_JMP | BPF_X | BPF_JGT; +/// BPF opcode: `jge dst, imm, +off` /// `PC += off if dst >= imm`. +pub const JGE_IMM: u8 = BPF_JMP | BPF_K | BPF_JGE; +/// BPF opcode: `jge dst, src, +off` /// `PC += off if dst >= src`. +pub const JGE_REG: u8 = BPF_JMP | BPF_X | BPF_JGE; +/// BPF opcode: `jlt dst, imm, +off` /// `PC += off if dst < imm`. +pub const JLT_IMM: u8 = BPF_JMP | BPF_K | BPF_JLT; +/// BPF opcode: `jlt dst, src, +off` /// `PC += off if dst < src`. +pub const JLT_REG: u8 = BPF_JMP | BPF_X | BPF_JLT; +/// BPF opcode: `jle dst, imm, +off` /// `PC += off if dst <= imm`. +pub const JLE_IMM: u8 = BPF_JMP | BPF_K | BPF_JLE; +/// BPF opcode: `jle dst, src, +off` /// `PC += off if dst <= src`. +pub const JLE_REG: u8 = BPF_JMP | BPF_X | BPF_JLE; +/// BPF opcode: `jset dst, imm, +off` /// `PC += off if dst & imm`. +pub const JSET_IMM: u8 = BPF_JMP | BPF_K | BPF_JSET; +/// BPF opcode: `jset dst, src, +off` /// `PC += off if dst & src`. +pub const JSET_REG: u8 = BPF_JMP | BPF_X | BPF_JSET; +/// BPF opcode: `jne dst, imm, +off` /// `PC += off if dst != imm`. +pub const JNE_IMM: u8 = BPF_JMP | BPF_K | BPF_JNE; +/// BPF opcode: `jne dst, src, +off` /// `PC += off if dst != src`. +pub const JNE_REG: u8 = BPF_JMP | BPF_X | BPF_JNE; +/// BPF opcode: `jsgt dst, imm, +off` /// `PC += off if dst > imm (signed)`. +pub const JSGT_IMM: u8 = BPF_JMP | BPF_K | BPF_JSGT; +/// BPF opcode: `jsgt dst, src, +off` /// `PC += off if dst > src (signed)`. +pub const JSGT_REG: u8 = BPF_JMP | BPF_X | BPF_JSGT; +/// BPF opcode: `jsge dst, imm, +off` /// `PC += off if dst >= imm (signed)`. +pub const JSGE_IMM: u8 = BPF_JMP | BPF_K | BPF_JSGE; +/// BPF opcode: `jsge dst, src, +off` /// `PC += off if dst >= src (signed)`. +pub const JSGE_REG: u8 = BPF_JMP | BPF_X | BPF_JSGE; +/// BPF opcode: `jslt dst, imm, +off` /// `PC += off if dst < imm (signed)`. +pub const JSLT_IMM: u8 = BPF_JMP | BPF_K | BPF_JSLT; +/// BPF opcode: `jslt dst, src, +off` /// `PC += off if dst < src (signed)`. +pub const JSLT_REG: u8 = BPF_JMP | BPF_X | BPF_JSLT; +/// BPF opcode: `jsle dst, imm, +off` /// `PC += off if dst <= imm (signed)`. +pub const JSLE_IMM: u8 = BPF_JMP | BPF_K | BPF_JSLE; +/// BPF opcode: `jsle dst, src, +off` /// `PC += off if dst <= src (signed)`. +pub const JSLE_REG: u8 = BPF_JMP | BPF_X | BPF_JSLE; + +/// BPF opcode: `call imm` /// syscall function call to syscall with key `imm`. +pub const CALL_IMM: u8 = BPF_JMP | BPF_CALL; +/// BPF opcode: tail call. +pub const CALL_REG: u8 = BPF_JMP | BPF_X | BPF_CALL; +/// BPF opcode: `exit` /// `return r0`. +pub const EXIT: u8 = BPF_JMP | BPF_EXIT; + +// Used in JIT +/// Mask to extract the operation class from an operation code. +pub const BPF_CLS_MASK: u8 = 0x07; +/// Mask to extract the arithmetic operation code from an instruction operation code. +pub const BPF_ALU_OP_MASK: u8 = 0xf0; + +/// An eBPF instruction. +/// +/// See for the Linux kernel +/// documentation about eBPF, or for a +/// more concise version. +#[derive(PartialEq, Eq, Clone, Default)] +pub struct Insn { + /// Instruction pointer. + pub ptr: usize, + /// Operation code. + pub opc: u8, + /// Destination register operand. + pub dst: u8, + /// Source register operand. + pub src: u8, + /// Offset operand. + pub off: i16, + /// Immediate value operand. + pub imm: i64, +} + +impl fmt::Debug for Insn { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "Insn {{ ptr: 0x{:08x?}, opc: 0x{:02x?}, dst: {}, src: {}, off: 0x{:04x?}, imm: 0x{:08x?} }}", + self.ptr, self.opc, self.dst, self.src, self.off, self.imm + ) + } +} + +impl Insn { + /// Turn an `Insn` back into an array of bytes. + /// + /// # Examples + /// + /// ``` + /// use solana_rbpf::ebpf; + /// + /// let prog: &[u8] = &[ + /// 0xb7, 0x12, 0x56, 0x34, 0xde, 0xbc, 0x9a, 0x78, + /// ]; + /// let insn = ebpf::Insn { + /// ptr: 0x00, + /// opc: 0xb7, + /// dst: 2, + /// src: 1, + /// off: 0x3456, + /// imm: 0x789abcde + /// }; + /// assert_eq!(insn.to_array(), prog); + /// ``` + pub fn to_array(&self) -> [u8; INSN_SIZE] { + [ + self.opc, + self.src.wrapping_shl(4) | self.dst, + (self.off & 0xff) as u8, + self.off.wrapping_shr(8) as u8, + (self.imm & 0xff) as u8, + (self.imm & 0xff_00).wrapping_shr(8) as u8, + (self.imm as u32 & 0xff_00_00).wrapping_shr(16) as u8, + (self.imm as u32 & 0xff_00_00_00).wrapping_shr(24) as u8, + ] + } + + /// Turn an `Insn` into an vector of bytes. + /// + /// # Examples + /// + /// ``` + /// use solana_rbpf::ebpf; + /// + /// let prog: Vec = vec![ + /// 0xb7, 0x12, 0x56, 0x34, 0xde, 0xbc, 0x9a, 0x78, + /// ]; + /// let insn = ebpf::Insn { + /// ptr: 0x00, + /// opc: 0xb7, + /// dst: 2, + /// src: 1, + /// off: 0x3456, + /// imm: 0x789abcde + /// }; + /// assert_eq!(insn.to_vec(), prog); + /// ``` + pub fn to_vec(&self) -> Vec { + self.to_array().to_vec() + } +} + +/// Get the instruction at `idx` of an eBPF program. `idx` is the index (number) of the +/// instruction (not a byte offset). The first instruction has index 0. +/// +/// # Panics +/// +/// Panics if it is not possible to get the instruction (if idx is too high, or last instruction is +/// incomplete). +/// +/// # Examples +/// +/// ``` +/// use solana_rbpf::ebpf; +/// +/// let prog = &[ +/// 0xb7, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +/// ]; +/// let insn = ebpf::get_insn(prog, 1); +/// assert_eq!(insn.opc, 0x95); +/// ``` +/// +/// The example below will panic, since the last instruction is not complete and cannot be loaded. +/// +/// ```rust,should_panic +/// use solana_rbpf::ebpf; +/// +/// let prog = &[ +/// 0xb7, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00 // two bytes missing +/// ]; +/// let insn = ebpf::get_insn(prog, 1); +/// ``` +pub fn get_insn(prog: &[u8], pc: usize) -> Insn { + // This guard should not be needed in most cases, since the verifier already checks the program + // size, and indexes should be fine in the interpreter/JIT. But this function is publicly + // available and user can call it with any `pc`, so we have to check anyway. + debug_assert!( + (pc + 1) * INSN_SIZE <= prog.len(), + "cannot reach instruction at index {:?} in program containing {:?} bytes", + pc, + prog.len() + ); + get_insn_unchecked(prog, pc) +} +/// Same as `get_insn` except not checked +pub fn get_insn_unchecked(prog: &[u8], pc: usize) -> Insn { + Insn { + ptr: pc, + opc: prog[INSN_SIZE * pc], + dst: prog[INSN_SIZE * pc + 1] & 0x0f, + src: (prog[INSN_SIZE * pc + 1] & 0xf0) >> 4, + off: LittleEndian::read_i16(&prog[(INSN_SIZE * pc + 2)..]), + imm: LittleEndian::read_i32(&prog[(INSN_SIZE * pc + 4)..]) as i64, + } +} + +/// Merge the two halves of a LD_DW_IMM instruction +pub fn augment_lddw_unchecked(prog: &[u8], insn: &mut Insn) { + let more_significant_half = LittleEndian::read_i32(&prog[((insn.ptr + 1) * INSN_SIZE + 4)..]); + insn.imm = ((insn.imm as u64 & 0xffffffff) | ((more_significant_half as u64) << 32)) as i64; +} + +/// Hash a symbol name +/// +/// This function is used by both the relocator and the VM to translate symbol names +/// into a 32 bit id used to identify a syscall function. The 32 bit id is used in the +/// eBPF `call` instruction's imm field. +pub fn hash_symbol_name(name: &[u8]) -> u32 { + let mut hasher = Murmur3Hasher::default(); + Hash::hash_slice(name, &mut hasher); + hasher.finish() +} diff --git a/rbpf/src/elf.rs b/rbpf/src/elf.rs new file mode 100644 index 00000000000000..0442174eedfff9 --- /dev/null +++ b/rbpf/src/elf.rs @@ -0,0 +1,1976 @@ +//! This module relocates a BPF ELF + +// Note: Typically ELF shared objects are loaded using the program headers and +// not the section headers. Since we are leveraging the elfkit crate its much +// easier to use the section headers. There are cases (reduced size, obfuscation) +// where the section headers may be removed from the ELF. If that happens then +// this loader will need to be re-written to use the program headers instead. + +use crate::{ + aligned_memory::{is_memory_aligned, AlignedMemory}, + ebpf::{self, EF_SBPF_V2, HOST_ALIGN, INSN_SIZE}, + elf_parser::{ + consts::{ + ELFCLASS64, ELFDATA2LSB, ELFOSABI_NONE, EM_BPF, EM_SBPF, ET_DYN, R_X86_64_32, + R_X86_64_64, R_X86_64_NONE, R_X86_64_RELATIVE, + }, + types::Elf64Word, + }, + elf_parser_glue::{ + ElfParser, ElfProgramHeader, ElfRelocation, ElfSectionHeader, ElfSymbol, GoblinParser, + NewParser, + }, + error::EbpfError, + memory_region::MemoryRegion, + program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, + verifier::Verifier, + vm::{Config, ContextObject}, +}; + +#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] +use crate::jit::{JitCompiler, JitProgram}; +use byteorder::{ByteOrder, LittleEndian}; +use std::{collections::BTreeMap, fmt::Debug, mem, ops::Range, str, sync::Arc}; + +/// Error definitions +#[derive(Debug, thiserror::Error, PartialEq, Eq)] +pub enum ElfError { + /// Failed to parse ELF file + #[error("Failed to parse ELF file: {0}")] + FailedToParse(String), + /// Entrypoint out of bounds + #[error("Entrypoint out of bounds")] + EntrypointOutOfBounds, + /// Invaid entrypoint + #[error("Invaid entrypoint")] + InvalidEntrypoint, + /// Failed to get section + #[error("Failed to get section {0}")] + FailedToGetSection(String), + /// Unresolved symbol + #[error("Unresolved symbol ({0}) at instruction #{1:?} (ELF file offset {2:#x})")] + UnresolvedSymbol(String, usize, usize), + /// Section not found + #[error("Section not found: {0}")] + SectionNotFound(String), + /// Relative jump out of bounds + #[error("Relative jump out of bounds at instruction #{0}")] + RelativeJumpOutOfBounds(usize), + /// Symbol hash collision + #[error("Symbol hash collision {0:#x}")] + SymbolHashCollision(u32), + /// Incompatible ELF: wrong endianess + #[error("Incompatible ELF: wrong endianess")] + WrongEndianess, + /// Incompatible ELF: wrong ABI + #[error("Incompatible ELF: wrong ABI")] + WrongAbi, + /// Incompatible ELF: wrong mchine + #[error("Incompatible ELF: wrong machine")] + WrongMachine, + /// Incompatible ELF: wrong class + #[error("Incompatible ELF: wrong class")] + WrongClass, + /// Not one text section + #[error("Multiple or no text sections, consider removing llc option: -function-sections")] + NotOneTextSection, + /// Read-write data not supported + #[error("Found writable section ({0}) in ELF, read-write data not supported")] + WritableSectionNotSupported(String), + /// Relocation failed, no loadable section contains virtual address + #[error("Relocation failed, no loadable section contains virtual address {0:#x}")] + AddressOutsideLoadableSection(u64), + /// Relocation failed, invalid referenced virtual address + #[error("Relocation failed, invalid referenced virtual address {0:#x}")] + InvalidVirtualAddress(u64), + /// Relocation failed, unknown type + #[error("Relocation failed, unknown type {0:?}")] + UnknownRelocation(u32), + /// Failed to read relocation info + #[error("Failed to read relocation info")] + FailedToReadRelocationInfo, + /// Incompatible ELF: wrong type + #[error("Incompatible ELF: wrong type")] + WrongType, + /// Unknown symbol + #[error("Unknown symbol with index {0}")] + UnknownSymbol(usize), + /// Offset or value is out of bounds + #[error("Offset or value is out of bounds")] + ValueOutOfBounds, + /// Detected sbpf_version required by the executable which are not enabled + #[error("Detected sbpf_version required by the executable which are not enabled")] + UnsupportedSBPFVersion, + /// Invalid program header + #[error("Invalid ELF program header")] + InvalidProgramHeader, +} + +// For more information on the BPF instruction set: +// https://github.com/iovisor/bpf-docs/blob/master/eBPF.md + +// msb lsb +// +------------------------+----------------+----+----+--------+ +// |immediate |offset |src |dst |opcode | +// +------------------------+----------------+----+----+--------+ + +// From least significant to most significant bit: +// 8 bit opcode +// 4 bit destination register (dst) +// 4 bit source register (src) +// 16 bit offset +// 32 bit immediate (imm) + +/// Byte offset of the immediate field in the instruction +const BYTE_OFFSET_IMMEDIATE: usize = 4; +/// Byte length of the immediate field +const BYTE_LENGTH_IMMEDIATE: usize = 4; + +/// BPF relocation types. +#[allow(non_camel_case_types)] +#[derive(Debug, PartialEq, Copy, Clone)] +enum BpfRelocationType { + /// No relocation, placeholder + R_Bpf_None = 0, + /// R_BPF_64_64 relocation type is used for ld_imm64 instruction. + /// The actual to-be-relocated data (0 or section offset) is + /// stored at r_offset + 4 and the read/write data bitsize is 32 + /// (4 bytes). The relocation can be resolved with the symbol + /// value plus implicit addend. + R_Bpf_64_64 = 1, + /// 64 bit relocation of a ldxdw instruction. The ldxdw + /// instruction occupies two instruction slots. The 64-bit address + /// to load from is split into the 32-bit imm field of each + /// slot. The first slot's pre-relocation imm field contains the + /// virtual address (typically same as the file offset) of the + /// location to load. Relocation involves calculating the + /// post-load 64-bit physical address referenced by the imm field + /// and writing that physical address back into the imm fields of + /// the ldxdw instruction. + R_Bpf_64_Relative = 8, + /// Relocation of a call instruction. The existing imm field + /// contains either an offset of the instruction to jump to (think + /// local function call) or a special value of "-1". If -1 the + /// symbol must be looked up in the symbol table. The relocation + /// entry contains the symbol number to call. In order to support + /// both local jumps and calling external symbols a 32-bit hash is + /// computed and stored in the the call instruction's 32-bit imm + /// field. The hash is used later to look up the 64-bit address + /// to jump to. In the case of a local jump the hash is + /// calculated using the current program counter and in the case + /// of a symbol the hash is calculated using the name of the + /// symbol. + R_Bpf_64_32 = 10, +} +impl BpfRelocationType { + fn from_x86_relocation_type(from: u32) -> Option { + match from { + R_X86_64_NONE => Some(BpfRelocationType::R_Bpf_None), + R_X86_64_64 => Some(BpfRelocationType::R_Bpf_64_64), + R_X86_64_RELATIVE => Some(BpfRelocationType::R_Bpf_64_Relative), + R_X86_64_32 => Some(BpfRelocationType::R_Bpf_64_32), + _ => None, + } + } +} + +#[derive(Debug, PartialEq)] +struct SectionInfo { + name: String, + vaddr: u64, + offset_range: Range, +} +impl SectionInfo { + fn mem_size(&self) -> usize { + mem::size_of::().saturating_add(self.name.capacity()) + } +} + +#[derive(Debug, PartialEq)] +pub(crate) enum Section { + /// Owned section data. + /// + /// The first field is the offset of the section from MM_PROGRAM_START. The + /// second field is the actual section data. + Owned(usize, Vec), + /// Borrowed section data. + /// + /// The first field is the offset of the section from MM_PROGRAM_START. The + /// second field an be used to index the input ELF buffer to retrieve the + /// section data. + Borrowed(usize, Range), +} + +/// Elf loader/relocator +#[derive(Debug, PartialEq)] +pub struct Executable { + /// Loaded and executable elf + elf_bytes: AlignedMemory<{ HOST_ALIGN }>, + /// Required SBPF capabilities + sbpf_version: SBPFVersion, + /// Read-only section + ro_section: Section, + /// Text section info + text_section_info: SectionInfo, + /// Address of the entry point + entry_pc: usize, + /// Call resolution map (hash, pc, name) + function_registry: FunctionRegistry, + /// Loader built-in program + loader: Arc>, + /// Compiled program and argument + #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] + compiled_program: Option, +} + +impl Executable { + /// Get the configuration settings + pub fn get_config(&self) -> &Config { + self.loader.get_config() + } + + /// Get the executable sbpf_version + pub fn get_sbpf_version(&self) -> &SBPFVersion { + &self.sbpf_version + } + + /// Get the .text section virtual address and bytes + pub fn get_text_bytes(&self) -> (u64, &[u8]) { + let (ro_offset, ro_section) = match &self.ro_section { + Section::Owned(offset, data) => (*offset, data.as_slice()), + Section::Borrowed(offset, byte_range) => { + (*offset, &self.elf_bytes.as_slice()[byte_range.clone()]) + } + }; + + let offset = self + .text_section_info + .vaddr + .saturating_sub(ebpf::MM_PROGRAM_START) + .saturating_sub(ro_offset as u64) as usize; + ( + self.text_section_info.vaddr, + &ro_section[offset..offset.saturating_add(self.text_section_info.offset_range.len())], + ) + } + + /// Get the concatenated read-only sections (including the text section) + pub fn get_ro_section(&self) -> &[u8] { + match &self.ro_section { + Section::Owned(_offset, data) => data.as_slice(), + Section::Borrowed(_offset, byte_range) => { + &self.elf_bytes.as_slice()[byte_range.clone()] + } + } + } + + /// Get a memory region that can be used to access the merged readonly section + pub fn get_ro_region(&self) -> MemoryRegion { + get_ro_region(&self.ro_section, self.elf_bytes.as_slice()) + } + + /// Get the entry point offset into the text section + pub fn get_entrypoint_instruction_offset(&self) -> usize { + self.entry_pc + } + + /// Get the text section offset + #[cfg(feature = "debugger")] + pub fn get_text_section_offset(&self) -> u64 { + self.text_section_info.offset_range.start as u64 + } + + /// Get the loader built-in program + pub fn get_loader(&self) -> &Arc> { + &self.loader + } + + /// Get the JIT compiled program + #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] + pub fn get_compiled_program(&self) -> Option<&JitProgram> { + self.compiled_program.as_ref() + } + + /// Verify the executable + pub fn verify(&self) -> Result<(), EbpfError> { + ::verify( + self.get_text_bytes().1, + self.get_config(), + self.get_sbpf_version(), + self.get_function_registry(), + )?; + Ok(()) + } + + /// JIT compile the executable + #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] + pub fn jit_compile(&mut self) -> Result<(), crate::error::EbpfError> { + let jit = JitCompiler::::new(self)?; + self.compiled_program = Some(jit.compile()?); + Ok(()) + } + + /// Get the function registry + pub fn get_function_registry(&self) -> &FunctionRegistry { + &self.function_registry + } + + /// Create from raw text section bytes (list of instructions) + pub fn new_from_text_bytes( + text_bytes: &[u8], + loader: Arc>, + sbpf_version: SBPFVersion, + mut function_registry: FunctionRegistry, + ) -> Result { + let elf_bytes = AlignedMemory::from_slice(text_bytes); + let config = loader.get_config(); + let enable_symbol_and_section_labels = config.enable_symbol_and_section_labels; + let entry_pc = if let Some((_name, pc)) = function_registry.lookup_by_name(b"entrypoint") { + pc + } else { + function_registry.register_function_hashed_legacy( + &loader, + !sbpf_version.static_syscalls(), + *b"entrypoint", + 0, + )?; + 0 + }; + Ok(Self { + elf_bytes, + sbpf_version, + ro_section: Section::Borrowed(0, 0..text_bytes.len()), + text_section_info: SectionInfo { + name: if enable_symbol_and_section_labels { + ".text".to_string() + } else { + String::default() + }, + vaddr: ebpf::MM_PROGRAM_START, + offset_range: 0..text_bytes.len(), + }, + entry_pc, + function_registry, + loader, + #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] + compiled_program: None, + }) + } + + /// Fully loads an ELF, including validation and relocation + pub fn load(bytes: &[u8], loader: Arc>) -> Result { + if loader.get_config().new_elf_parser { + // The new parser creates references from the input byte slice, so + // it must be properly aligned. We assume that HOST_ALIGN is a + // multiple of the ELF "natural" alignment. See test_load_unaligned. + let aligned; + let bytes = if is_memory_aligned(bytes.as_ptr() as usize, HOST_ALIGN) { + bytes + } else { + aligned = AlignedMemory::<{ HOST_ALIGN }>::from_slice(bytes); + aligned.as_slice() + }; + Self::load_with_parser(&NewParser::parse(bytes)?, bytes, loader) + } else { + Self::load_with_parser(&GoblinParser::parse(bytes)?, bytes, loader) + } + } + + fn load_with_parser<'a, P: ElfParser<'a>>( + elf: &'a P, + bytes: &[u8], + loader: Arc>, + ) -> Result { + let mut elf_bytes = AlignedMemory::from_slice(bytes); + let config = loader.get_config(); + let header = elf.header(); + let sbpf_version = if header.e_flags == EF_SBPF_V2 { + SBPFVersion::V2 + } else { + SBPFVersion::V1 + }; + + Self::validate(config, elf, elf_bytes.as_slice())?; + + // calculate the text section info + let text_section = elf.section(b".text")?; + let text_section_info = SectionInfo { + name: if config.enable_symbol_and_section_labels { + elf.section_name(text_section.sh_name()) + .and_then(|name| std::str::from_utf8(name).ok()) + .unwrap_or(".text") + .to_string() + } else { + String::default() + }, + vaddr: if sbpf_version.enable_elf_vaddr() + && text_section.sh_addr() >= ebpf::MM_PROGRAM_START + { + text_section.sh_addr() + } else { + text_section + .sh_addr() + .saturating_add(ebpf::MM_PROGRAM_START) + }, + offset_range: text_section.file_range().unwrap_or_default(), + }; + let vaddr_end = if sbpf_version.reject_rodata_stack_overlap() { + text_section_info + .vaddr + .saturating_add(text_section.sh_size()) + } else { + text_section_info.vaddr + }; + if (config.reject_broken_elfs + && !sbpf_version.enable_elf_vaddr() + && text_section.sh_addr() != text_section.sh_offset()) + || vaddr_end > ebpf::MM_STACK_START + { + return Err(ElfError::ValueOutOfBounds); + } + + // relocate symbols + let mut function_registry = FunctionRegistry::default(); + Self::relocate( + &mut function_registry, + &loader, + elf, + elf_bytes.as_slice_mut(), + )?; + + // calculate entrypoint offset into the text section + let offset = header.e_entry.saturating_sub(text_section.sh_addr()); + if offset.checked_rem(ebpf::INSN_SIZE as u64) != Some(0) { + return Err(ElfError::InvalidEntrypoint); + } + let entry_pc = if let Some(entry_pc) = (offset as usize).checked_div(ebpf::INSN_SIZE) { + if !sbpf_version.static_syscalls() { + function_registry.unregister_function(ebpf::hash_symbol_name(b"entrypoint")); + } + function_registry.register_function_hashed_legacy( + &loader, + !sbpf_version.static_syscalls(), + *b"entrypoint", + entry_pc, + )?; + entry_pc + } else { + return Err(ElfError::InvalidEntrypoint); + }; + + let ro_section = Self::parse_ro_sections( + config, + &sbpf_version, + elf.section_headers() + .map(|s| (elf.section_name(s.sh_name()), s)), + elf_bytes.as_slice(), + )?; + + Ok(Self { + elf_bytes, + sbpf_version, + ro_section, + text_section_info, + entry_pc, + function_registry, + loader, + #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] + compiled_program: None, + }) + } + + /// Calculate the total memory size of the executable + #[rustfmt::skip] + #[allow(clippy::size_of_ref)] + pub fn mem_size(&self) -> usize { + let mut total = mem::size_of::(); + total = total + // elf bytes + .saturating_add(self.elf_bytes.mem_size()) + // ro section + .saturating_add(match &self.ro_section { + Section::Owned(_, data) => data.capacity(), + Section::Borrowed(_, _) => 0, + }) + // text section info + .saturating_add(self.text_section_info.mem_size()) + // bpf functions + .saturating_add(self.function_registry.mem_size()); + + #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] + { + // compiled programs + total = total.saturating_add(self.compiled_program.as_ref().map_or(0, |program| program.mem_size())); + } + + total + } + + // Functions exposed for tests + + /// Validates the ELF + pub fn validate<'a, P: ElfParser<'a>>( + config: &Config, + elf: &'a P, + elf_bytes: &[u8], + ) -> Result<(), ElfError> { + let header = elf.header(); + if header.e_ident.ei_class != ELFCLASS64 { + return Err(ElfError::WrongClass); + } + if header.e_ident.ei_data != ELFDATA2LSB { + return Err(ElfError::WrongEndianess); + } + if header.e_ident.ei_osabi != ELFOSABI_NONE { + return Err(ElfError::WrongAbi); + } + if header.e_machine != EM_BPF && (!config.new_elf_parser || header.e_machine != EM_SBPF) { + return Err(ElfError::WrongMachine); + } + if header.e_type != ET_DYN { + return Err(ElfError::WrongType); + } + + let sbpf_version = if header.e_flags == EF_SBPF_V2 { + if !config.enable_sbpf_v2 { + return Err(ElfError::UnsupportedSBPFVersion); + } + SBPFVersion::V2 + } else { + if !config.enable_sbpf_v1 { + return Err(ElfError::UnsupportedSBPFVersion); + } + SBPFVersion::V1 + }; + + if sbpf_version.enable_elf_vaddr() { + if !config.optimize_rodata { + // When optimize_rodata=false, we allocate a vector and copy all + // rodata sections into it. In that case we can't allow virtual + // addresses or we'd potentially have to do huge allocations. + return Err(ElfError::UnsupportedSBPFVersion); + } + + // This is needed to avoid an overflow error in header.vm_range() as + // used by relocate(). See https://github.com/m4b/goblin/pull/306. + // + // Once we bump to a version of goblin that includes the fix, this + // check can be removed, and relocate() will still return + // ValueOutOfBounds on malformed program headers. + if elf + .program_headers() + .any(|header| header.p_vaddr().checked_add(header.p_memsz()).is_none()) + { + return Err(ElfError::InvalidProgramHeader); + } + + // The toolchain currently emits up to 4 program headers. 10 is a + // future proof nice round number. + // + // program_headers() returns an ExactSizeIterator so count doesn't + // actually iterate again. + if elf.program_headers().count() >= 10 { + return Err(ElfError::InvalidProgramHeader); + } + } + + let num_text_sections = elf + .section_headers() + .fold(0, |count: usize, section_header| { + if let Some(this_name) = elf.section_name(section_header.sh_name()) { + if this_name == b".text" { + return count.saturating_add(1); + } + } + count + }); + if 1 != num_text_sections { + return Err(ElfError::NotOneTextSection); + } + + for section_header in elf.section_headers() { + if let Some(name) = elf.section_name(section_header.sh_name()) { + if name.starts_with(b".bss") + || (section_header.is_writable() + && (name.starts_with(b".data") && !name.starts_with(b".data.rel"))) + { + return Err(ElfError::WritableSectionNotSupported( + String::from_utf8_lossy(name).to_string(), + )); + } + } + } + + for section_header in elf.section_headers() { + let start = section_header.sh_offset() as usize; + let end = section_header + .sh_offset() + .checked_add(section_header.sh_size()) + .ok_or(ElfError::ValueOutOfBounds)? as usize; + let _ = elf_bytes + .get(start..end) + .ok_or(ElfError::ValueOutOfBounds)?; + } + let text_section = elf.section(b".text")?; + if !text_section.vm_range().contains(&header.e_entry) { + return Err(ElfError::EntrypointOutOfBounds); + } + + Ok(()) + } + + pub(crate) fn parse_ro_sections< + 'a, + T: ElfSectionHeader + 'a, + S: IntoIterator, &'a T)>, + >( + config: &Config, + sbpf_version: &SBPFVersion, + sections: S, + elf_bytes: &[u8], + ) -> Result { + // the lowest section address + let mut lowest_addr = usize::MAX; + // the highest section address + let mut highest_addr = 0; + // the aggregated section length, not including gaps between sections + let mut ro_fill_length = 0usize; + let mut invalid_offsets = false; + // when sbpf_version.enable_elf_vaddr()=true, we allow section_addr != sh_offset + // if section_addr - sh_offset is constant across all sections. That is, + // we allow sections to be translated by a fixed virtual offset. + let mut addr_file_offset = None; + + // keep track of where ro sections are so we can tell whether they're + // contiguous + let mut first_ro_section = 0; + let mut last_ro_section = 0; + let mut n_ro_sections = 0usize; + + let mut ro_slices = vec![]; + for (i, (name, section_header)) in sections.into_iter().enumerate() { + match name { + Some(name) + if name == b".text" + || name == b".rodata" + || name == b".data.rel.ro" + || name == b".eh_frame" => {} + _ => continue, + } + + if n_ro_sections == 0 { + first_ro_section = i; + } + last_ro_section = i; + n_ro_sections = n_ro_sections.saturating_add(1); + + let section_addr = section_header.sh_addr(); + + // sh_offset handling: + // + // If sbpf_version.enable_elf_vaddr()=true, we allow section_addr > + // sh_offset, if section_addr - sh_offset is constant across all + // sections. That is, we allow the linker to align rodata to a + // positive base address (MM_PROGRAM_START) as long as the mapping + // to sh_offset(s) stays linear. + // + // If sbpf_version.enable_elf_vaddr()=false, section_addr must match + // sh_offset for backwards compatibility + if !invalid_offsets { + if sbpf_version.enable_elf_vaddr() { + // This is enforced in validate() + debug_assert!(config.optimize_rodata); + if section_addr < section_header.sh_offset() { + invalid_offsets = true; + } else { + let offset = section_addr.saturating_sub(section_header.sh_offset()); + if *addr_file_offset.get_or_insert(offset) != offset { + // The sections are not all translated by the same + // constant. We won't be able to borrow, but unless + // config.reject_broken_elf=true, we're still going + // to accept this file for backwards compatibility. + invalid_offsets = true; + } + } + } else if section_addr != section_header.sh_offset() { + invalid_offsets = true; + } + } + + let mut vaddr_end = + if sbpf_version.enable_elf_vaddr() && section_addr >= ebpf::MM_PROGRAM_START { + section_addr + } else { + section_addr.saturating_add(ebpf::MM_PROGRAM_START) + }; + if sbpf_version.reject_rodata_stack_overlap() { + vaddr_end = vaddr_end.saturating_add(section_header.sh_size()); + } + if (config.reject_broken_elfs && invalid_offsets) || vaddr_end > ebpf::MM_STACK_START { + return Err(ElfError::ValueOutOfBounds); + } + + let section_data = elf_bytes + .get(section_header.file_range().unwrap_or_default()) + .ok_or(ElfError::ValueOutOfBounds)?; + + let section_addr = section_addr as usize; + lowest_addr = lowest_addr.min(section_addr); + highest_addr = highest_addr.max(section_addr.saturating_add(section_data.len())); + ro_fill_length = ro_fill_length.saturating_add(section_data.len()); + + ro_slices.push((section_addr, section_data)); + } + + if config.reject_broken_elfs && lowest_addr.saturating_add(ro_fill_length) > highest_addr { + return Err(ElfError::ValueOutOfBounds); + } + + let can_borrow = !invalid_offsets + && last_ro_section + .saturating_add(1) + .saturating_sub(first_ro_section) + == n_ro_sections; + if sbpf_version.enable_elf_vaddr() && !can_borrow { + return Err(ElfError::ValueOutOfBounds); + } + let ro_section = if config.optimize_rodata && can_borrow { + // Read only sections are grouped together with no intermixed non-ro + // sections. We can borrow. + + // When sbpf_version.enable_elf_vaddr()=true, section addresses and their + // corresponding buffer offsets can be translated by a constant + // amount. Subtract the constant to get buffer positions. + let buf_offset_start = + lowest_addr.saturating_sub(addr_file_offset.unwrap_or(0) as usize); + let buf_offset_end = + highest_addr.saturating_sub(addr_file_offset.unwrap_or(0) as usize); + + let addr_offset = if lowest_addr >= ebpf::MM_PROGRAM_START as usize { + // The first field of Section::Borrowed is an offset from + // ebpf::MM_PROGRAM_START so if the linker has already put the + // sections within ebpf::MM_PROGRAM_START, we need to subtract + // it now. + lowest_addr.saturating_sub(ebpf::MM_PROGRAM_START as usize) + } else { + if sbpf_version.enable_elf_vaddr() { + return Err(ElfError::ValueOutOfBounds); + } + lowest_addr + }; + + Section::Borrowed(addr_offset, buf_offset_start..buf_offset_end) + } else { + // Read only and other non-ro sections are mixed. Zero the non-ro + // sections and and copy the ro ones at their intended offsets. + + if config.optimize_rodata { + // The rodata region starts at MM_PROGRAM_START + offset, + // [MM_PROGRAM_START, MM_PROGRAM_START + offset) is not + // mappable. We only need to allocate highest_addr - lowest_addr + // bytes. + highest_addr = highest_addr.saturating_sub(lowest_addr); + } else { + // For backwards compatibility, the whole [MM_PROGRAM_START, + // MM_PROGRAM_START + highest_addr) range is mappable. We need + // to allocate the whole address range. + lowest_addr = 0; + }; + + let buf_len = highest_addr; + if buf_len > elf_bytes.len() { + return Err(ElfError::ValueOutOfBounds); + } + + let mut ro_section = vec![0; buf_len]; + for (section_addr, slice) in ro_slices.iter() { + let buf_offset_start = section_addr.saturating_sub(lowest_addr); + ro_section[buf_offset_start..buf_offset_start.saturating_add(slice.len())] + .copy_from_slice(slice); + } + + let addr_offset = if lowest_addr >= ebpf::MM_PROGRAM_START as usize { + lowest_addr.saturating_sub(ebpf::MM_PROGRAM_START as usize) + } else { + lowest_addr + }; + Section::Owned(addr_offset, ro_section) + }; + + Ok(ro_section) + } + + /// Relocates the ELF in-place + fn relocate<'a, P: ElfParser<'a>>( + function_registry: &mut FunctionRegistry, + loader: &BuiltinProgram, + elf: &'a P, + elf_bytes: &mut [u8], + ) -> Result<(), ElfError> { + let mut syscall_cache = BTreeMap::new(); + let text_section = elf.section(b".text")?; + let sbpf_version = if elf.header().e_flags == EF_SBPF_V2 { + SBPFVersion::V2 + } else { + SBPFVersion::V1 + }; + + // Fixup all program counter relative call instructions + let config = loader.get_config(); + let text_bytes = elf_bytes + .get_mut(text_section.file_range().unwrap_or_default()) + .ok_or(ElfError::ValueOutOfBounds)?; + let instruction_count = text_bytes + .len() + .checked_div(ebpf::INSN_SIZE) + .ok_or(ElfError::ValueOutOfBounds)?; + for i in 0..instruction_count { + let insn = ebpf::get_insn(text_bytes, i); + if insn.opc == ebpf::CALL_IMM + && insn.imm != -1 + && !(sbpf_version.static_syscalls() && insn.src == 0) + { + let target_pc = (i as isize) + .saturating_add(1) + .saturating_add(insn.imm as isize); + if target_pc < 0 || target_pc >= instruction_count as isize { + return Err(ElfError::RelativeJumpOutOfBounds(i)); + } + let name = if config.enable_symbol_and_section_labels { + format!("function_{target_pc}") + } else { + String::default() + }; + let key = function_registry.register_function_hashed_legacy( + loader, + !sbpf_version.static_syscalls(), + name.as_bytes(), + target_pc as usize, + )?; + let offset = i.saturating_mul(ebpf::INSN_SIZE).saturating_add(4); + let checked_slice = text_bytes + .get_mut(offset..offset.saturating_add(4)) + .ok_or(ElfError::ValueOutOfBounds)?; + LittleEndian::write_u32(checked_slice, key); + } + } + + let mut program_header: Option<&

>::ProgramHeader> = None; + + // Fixup all the relocations in the relocation section if exists + for relocation in elf.dynamic_relocations() { + let mut r_offset = relocation.r_offset() as usize; + + // When sbpf_version.enable_elf_vaddr()=true, we allow section.sh_addr != + // section.sh_offset so we need to bring r_offset to the correct + // byte offset. + if sbpf_version.enable_elf_vaddr() { + match program_header { + Some(header) if header.vm_range().contains(&(r_offset as u64)) => {} + _ => { + program_header = elf + .program_headers() + .find(|header| header.vm_range().contains(&(r_offset as u64))) + } + } + let header = program_header.as_ref().ok_or(ElfError::ValueOutOfBounds)?; + r_offset = r_offset + .saturating_sub(header.p_vaddr() as usize) + .saturating_add(header.p_offset() as usize); + } + + match BpfRelocationType::from_x86_relocation_type(relocation.r_type()) { + Some(BpfRelocationType::R_Bpf_64_64) => { + // Offset of the immediate field + let imm_offset = if text_section + .file_range() + .unwrap_or_default() + .contains(&r_offset) + || sbpf_version == SBPFVersion::V1 + { + r_offset.saturating_add(BYTE_OFFSET_IMMEDIATE) + } else { + r_offset + }; + + // Read the instruction's immediate field which contains virtual + // address to convert to physical + let checked_slice = elf_bytes + .get(imm_offset..imm_offset.saturating_add(BYTE_LENGTH_IMMEDIATE)) + .ok_or(ElfError::ValueOutOfBounds)?; + let refd_addr = LittleEndian::read_u32(checked_slice) as u64; + + let symbol = elf + .dynamic_symbol(relocation.r_sym()) + .ok_or_else(|| ElfError::UnknownSymbol(relocation.r_sym() as usize))?; + + // The relocated address is relative to the address of the + // symbol at index `r_sym` + let mut addr = symbol.st_value().saturating_add(refd_addr); + + // The "physical address" from the VM's perspective is rooted + // at `MM_PROGRAM_START`. If the linker hasn't already put + // the symbol within `MM_PROGRAM_START`, we need to do so + // now. + if addr < ebpf::MM_PROGRAM_START { + addr = ebpf::MM_PROGRAM_START.saturating_add(addr); + } + + if text_section + .file_range() + .unwrap_or_default() + .contains(&r_offset) + || sbpf_version == SBPFVersion::V1 + { + let imm_low_offset = imm_offset; + let imm_high_offset = imm_low_offset.saturating_add(INSN_SIZE); + + // Write the low side of the relocate address + let imm_slice = elf_bytes + .get_mut( + imm_low_offset + ..imm_low_offset.saturating_add(BYTE_LENGTH_IMMEDIATE), + ) + .ok_or(ElfError::ValueOutOfBounds)?; + LittleEndian::write_u32(imm_slice, (addr & 0xFFFFFFFF) as u32); + + // Write the high side of the relocate address + let imm_slice = elf_bytes + .get_mut( + imm_high_offset + ..imm_high_offset.saturating_add(BYTE_LENGTH_IMMEDIATE), + ) + .ok_or(ElfError::ValueOutOfBounds)?; + LittleEndian::write_u32( + imm_slice, + addr.checked_shr(32).unwrap_or_default() as u32, + ); + } else { + let imm_slice = elf_bytes + .get_mut(imm_offset..imm_offset.saturating_add(8)) + .ok_or(ElfError::ValueOutOfBounds)?; + LittleEndian::write_u64(imm_slice, addr); + } + } + Some(BpfRelocationType::R_Bpf_64_Relative) => { + // Relocation between different sections, where the target + // memory is not associated to a symbol (eg some compiler + // generated rodata that doesn't have an explicit symbol). + + // Offset of the immediate field + let imm_offset = r_offset.saturating_add(BYTE_OFFSET_IMMEDIATE); + + if text_section + .file_range() + .unwrap_or_default() + .contains(&r_offset) + { + // We're relocating a lddw instruction, which spans two + // instruction slots. The address to be relocated is + // split in two halves in the two imms of the + // instruction slots. + let imm_low_offset = imm_offset; + let imm_high_offset = r_offset + .saturating_add(INSN_SIZE) + .saturating_add(BYTE_OFFSET_IMMEDIATE); + + // Read the low side of the address + let imm_slice = elf_bytes + .get( + imm_low_offset + ..imm_low_offset.saturating_add(BYTE_LENGTH_IMMEDIATE), + ) + .ok_or(ElfError::ValueOutOfBounds)?; + let va_low = LittleEndian::read_u32(imm_slice) as u64; + + // Read the high side of the address + let imm_slice = elf_bytes + .get( + imm_high_offset + ..imm_high_offset.saturating_add(BYTE_LENGTH_IMMEDIATE), + ) + .ok_or(ElfError::ValueOutOfBounds)?; + let va_high = LittleEndian::read_u32(imm_slice) as u64; + + // Put the address back together + let mut refd_addr = va_high.checked_shl(32).unwrap_or_default() | va_low; + + if refd_addr == 0 { + return Err(ElfError::InvalidVirtualAddress(refd_addr)); + } + + if refd_addr < ebpf::MM_PROGRAM_START { + // The linker hasn't already placed rodata within + // MM_PROGRAM_START, so we do so now + refd_addr = ebpf::MM_PROGRAM_START.saturating_add(refd_addr); + } + + // Write back the low half + let imm_slice = elf_bytes + .get_mut( + imm_low_offset + ..imm_low_offset.saturating_add(BYTE_LENGTH_IMMEDIATE), + ) + .ok_or(ElfError::ValueOutOfBounds)?; + LittleEndian::write_u32(imm_slice, (refd_addr & 0xFFFFFFFF) as u32); + + // Write back the high half + let imm_slice = elf_bytes + .get_mut( + imm_high_offset + ..imm_high_offset.saturating_add(BYTE_LENGTH_IMMEDIATE), + ) + .ok_or(ElfError::ValueOutOfBounds)?; + LittleEndian::write_u32( + imm_slice, + refd_addr.checked_shr(32).unwrap_or_default() as u32, + ); + } else { + let refd_addr = if sbpf_version != SBPFVersion::V1 { + // We're relocating an address inside a data section (eg .rodata). The + // address is encoded as a simple u64. + + let addr_slice = elf_bytes + .get(r_offset..r_offset.saturating_add(mem::size_of::())) + .ok_or(ElfError::ValueOutOfBounds)?; + let mut refd_addr = LittleEndian::read_u64(addr_slice); + if refd_addr < ebpf::MM_PROGRAM_START { + // Not within MM_PROGRAM_START, do it now + refd_addr = ebpf::MM_PROGRAM_START.saturating_add(refd_addr); + } + refd_addr + } else { + // There used to be a bug in toolchains before + // https://github.com/solana-labs/llvm-project/pull/35 where for 64 bit + // relocations we were encoding only the low 32 bits, shifted 32 bits to + // the left. Our relocation code used to be compatible with that, so we + // need to keep supporting this case for backwards compatibility. + let addr_slice = elf_bytes + .get(imm_offset..imm_offset.saturating_add(BYTE_LENGTH_IMMEDIATE)) + .ok_or(ElfError::ValueOutOfBounds)?; + let refd_addr = LittleEndian::read_u32(addr_slice) as u64; + ebpf::MM_PROGRAM_START.saturating_add(refd_addr) + }; + + let addr_slice = elf_bytes + .get_mut(r_offset..r_offset.saturating_add(mem::size_of::())) + .ok_or(ElfError::ValueOutOfBounds)?; + LittleEndian::write_u64(addr_slice, refd_addr); + } + } + Some(BpfRelocationType::R_Bpf_64_32) => { + // The .text section has an unresolved call to symbol instruction + // Hash the symbol name and stick it into the call instruction's imm + // field. Later that hash will be used to look up the function location. + + // Offset of the immediate field + let imm_offset = r_offset.saturating_add(BYTE_OFFSET_IMMEDIATE); + + let symbol = elf + .dynamic_symbol(relocation.r_sym()) + .ok_or_else(|| ElfError::UnknownSymbol(relocation.r_sym() as usize))?; + + let name = elf + .dynamic_symbol_name(symbol.st_name() as Elf64Word) + .ok_or_else(|| ElfError::UnknownSymbol(symbol.st_name() as usize))?; + + // If the symbol is defined, this is a bpf-to-bpf call + let key = if symbol.is_function() && symbol.st_value() != 0 { + if !text_section.vm_range().contains(&symbol.st_value()) { + return Err(ElfError::ValueOutOfBounds); + } + let target_pc = (symbol.st_value().saturating_sub(text_section.sh_addr()) + as usize) + .checked_div(ebpf::INSN_SIZE) + .unwrap_or_default(); + function_registry.register_function_hashed_legacy( + loader, + !sbpf_version.static_syscalls(), + name, + target_pc, + )? + } else { + // Else it's a syscall + let hash = *syscall_cache + .entry(symbol.st_name()) + .or_insert_with(|| ebpf::hash_symbol_name(name)); + if config.reject_broken_elfs + && loader.get_function_registry().lookup_by_key(hash).is_none() + { + return Err(ElfError::UnresolvedSymbol( + String::from_utf8_lossy(name).to_string(), + r_offset.checked_div(ebpf::INSN_SIZE).unwrap_or(0), + r_offset, + )); + } + hash + }; + + let checked_slice = elf_bytes + .get_mut(imm_offset..imm_offset.saturating_add(BYTE_LENGTH_IMMEDIATE)) + .ok_or(ElfError::ValueOutOfBounds)?; + LittleEndian::write_u32(checked_slice, key); + } + _ => return Err(ElfError::UnknownRelocation(relocation.r_type())), + } + } + + if config.enable_symbol_and_section_labels { + // Register all known function names from the symbol table + for symbol in elf.symbols() { + if symbol.st_info() & 0xEF != 0x02 { + continue; + } + if !text_section.vm_range().contains(&symbol.st_value()) { + return Err(ElfError::ValueOutOfBounds); + } + let target_pc = (symbol.st_value().saturating_sub(text_section.sh_addr()) as usize) + .checked_div(ebpf::INSN_SIZE) + .unwrap_or_default(); + let name = elf + .symbol_name(symbol.st_name() as Elf64Word) + .ok_or_else(|| ElfError::UnknownSymbol(symbol.st_name() as usize))?; + function_registry.register_function_hashed_legacy( + loader, + !sbpf_version.static_syscalls(), + name, + target_pc, + )?; + } + } + + Ok(()) + } + + #[allow(dead_code)] + fn dump_data(name: &str, prog: &[u8]) { + let mut eight_bytes: Vec = Vec::new(); + println!("{name}"); + for i in prog.iter() { + if eight_bytes.len() >= 7 { + println!("{eight_bytes:02X?}"); + eight_bytes.clear(); + } else { + eight_bytes.push(*i); + } + } + } +} + +pub(crate) fn get_ro_region(ro_section: &Section, elf: &[u8]) -> MemoryRegion { + let (offset, ro_data) = match ro_section { + Section::Owned(offset, data) => (*offset, data.as_slice()), + Section::Borrowed(offset, byte_range) => (*offset, &elf[byte_range.clone()]), + }; + + // If offset > 0, the region will start at MM_PROGRAM_START + the offset of + // the first read only byte. [MM_PROGRAM_START, MM_PROGRAM_START + offset) + // will be unmappable, see MemoryRegion::vm_to_host. + MemoryRegion::new_readonly( + ro_data, + ebpf::MM_PROGRAM_START.saturating_add(offset as u64), + ) +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{ + ebpf, + elf_parser::{ + // FIXME consts::{ELFCLASS32, ELFDATA2MSB, ET_REL}, + consts::{ELFCLASS32, ELFDATA2MSB, ET_REL}, + types::{Elf64Ehdr, Elf64Shdr}, + SECTION_NAME_LENGTH_MAXIMUM, + }, + error::ProgramResult, + fuzz::fuzz, + program::BuiltinFunction, + syscalls, + vm::TestContextObject, + }; + use rand::{distributions::Uniform, Rng}; + use std::{fs::File, io::Read}; + use test_utils::assert_error; + type ElfExecutable = Executable; + + fn loader() -> Arc> { + let mut function_registry = + FunctionRegistry::>::default(); + function_registry + .register_function_hashed(*b"log", syscalls::SyscallString::vm) + .unwrap(); + function_registry + .register_function_hashed(*b"log_64", syscalls::SyscallU64::vm) + .unwrap(); + Arc::new(BuiltinProgram::new_loader( + Config::default(), + function_registry, + )) + } + + #[test] + fn test_validate() { + let elf_bytes = std::fs::read("tests/elfs/relative_call.so").unwrap(); + let elf = NewParser::parse(&elf_bytes).unwrap(); + let mut header = elf.header().clone(); + + let config = Config::default(); + + let write_header = |header: Elf64Ehdr| unsafe { + let mut bytes = elf_bytes.clone(); + std::ptr::write(bytes.as_mut_ptr() as *mut Elf64Ehdr, header); + bytes + }; + + ElfExecutable::validate(&config, &elf, &elf_bytes).expect("validation failed"); + + header.e_ident.ei_class = ELFCLASS32; + let bytes = write_header(header.clone()); + // the new parser rejects anything other than ELFCLASS64 directly + NewParser::parse(&bytes).expect_err("allowed bad class"); + ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes) + .expect_err("allowed bad class"); + + header.e_ident.ei_class = ELFCLASS64; + let bytes = write_header(header.clone()); + ElfExecutable::validate(&config, &NewParser::parse(&bytes).unwrap(), &elf_bytes) + .expect("validation failed"); + ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes) + .expect("validation failed"); + + header.e_ident.ei_data = ELFDATA2MSB; + let bytes = write_header(header.clone()); + // the new parser only supports little endian + NewParser::parse(&bytes).expect_err("allowed big endian"); + + header.e_ident.ei_data = ELFDATA2LSB; + let bytes = write_header(header.clone()); + ElfExecutable::validate(&config, &NewParser::parse(&bytes).unwrap(), &elf_bytes) + .expect("validation failed"); + ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes) + .expect("validation failed"); + + header.e_ident.ei_osabi = 1; + let bytes = write_header(header.clone()); + ElfExecutable::validate(&config, &NewParser::parse(&bytes).unwrap(), &elf_bytes) + .expect_err("allowed wrong abi"); + ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes) + .expect_err("allowed wrong abi"); + + header.e_ident.ei_osabi = ELFOSABI_NONE; + let bytes = write_header(header.clone()); + ElfExecutable::validate(&config, &NewParser::parse(&bytes).unwrap(), &elf_bytes) + .expect("validation failed"); + ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes) + .expect("validation failed"); + + header.e_machine = 42; + let bytes = write_header(header.clone()); + ElfExecutable::validate(&config, &NewParser::parse(&bytes).unwrap(), &elf_bytes) + .expect_err("allowed wrong machine"); + ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes) + .expect_err("allowed wrong machine"); + + header.e_machine = EM_BPF; + let bytes = write_header(header.clone()); + ElfExecutable::validate(&config, &NewParser::parse(&bytes).unwrap(), &elf_bytes) + .expect("validation failed"); + ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes) + .expect("validation failed"); + + header.e_type = ET_REL; + let bytes = write_header(header); + ElfExecutable::validate(&config, &NewParser::parse(&bytes).unwrap(), &elf_bytes) + .expect_err("allowed wrong type"); + ElfExecutable::validate(&config, &GoblinParser::parse(&bytes).unwrap(), &elf_bytes) + .expect_err("allowed wrong type"); + } + + #[test] + fn test_load() { + let mut file = File::open("tests/elfs/relative_call.so").expect("file open failed"); + let mut elf_bytes = Vec::new(); + file.read_to_end(&mut elf_bytes) + .expect("failed to read elf file"); + ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); + } + + #[test] + fn test_load_unaligned() { + let mut elf_bytes = + std::fs::read("tests/elfs/relative_call.so").expect("failed to read elf file"); + // The default allocator allocates aligned memory. Move the ELF slice to + // elf_bytes.as_ptr() + 1 to make it unaligned and test unaligned + // parsing. + elf_bytes.insert(0, 0); + ElfExecutable::load(&elf_bytes[1..], loader()).expect("validation failed"); + } + + #[test] + fn test_entrypoint() { + let loader = loader(); + + let mut file = File::open("tests/elfs/syscall_static.so").expect("file open failed"); + let mut elf_bytes = Vec::new(); + file.read_to_end(&mut elf_bytes) + .expect("failed to read elf file"); + let elf = ElfExecutable::load(&elf_bytes, loader.clone()).expect("validation failed"); + let parsed_elf = NewParser::parse(&elf_bytes).unwrap(); + let executable: &Executable = &elf; + assert_eq!(0, executable.get_entrypoint_instruction_offset()); + + let write_header = |header: Elf64Ehdr| unsafe { + let mut bytes = elf_bytes.clone(); + std::ptr::write(bytes.as_mut_ptr() as *mut Elf64Ehdr, header); + bytes + }; + + let mut header = parsed_elf.header().clone(); + let initial_e_entry = header.e_entry; + + header.e_entry += 8; + let elf_bytes = write_header(header.clone()); + let elf = ElfExecutable::load(&elf_bytes, loader.clone()).expect("validation failed"); + let executable: &Executable = &elf; + assert_eq!(1, executable.get_entrypoint_instruction_offset()); + + header.e_entry = 1; + let elf_bytes = write_header(header.clone()); + assert!(matches!( + ElfExecutable::load(&elf_bytes, loader.clone()), + Err(ElfError::EntrypointOutOfBounds) + )); + + header.e_entry = u64::MAX; + let elf_bytes = write_header(header.clone()); + assert!(matches!( + ElfExecutable::load(&elf_bytes, loader.clone()), + Err(ElfError::EntrypointOutOfBounds) + )); + + header.e_entry = initial_e_entry + ebpf::INSN_SIZE as u64 + 1; + let elf_bytes = write_header(header.clone()); + assert!(matches!( + ElfExecutable::load(&elf_bytes, loader.clone()), + Err(ElfError::InvalidEntrypoint) + )); + + header.e_entry = initial_e_entry; + let elf_bytes = write_header(header); + let elf = ElfExecutable::load(&elf_bytes, loader).expect("validation failed"); + let executable: &Executable = &elf; + assert_eq!(0, executable.get_entrypoint_instruction_offset()); + } + + #[test] + #[ignore] + fn test_fuzz_load() { + let loader = loader(); + + // Random bytes, will mostly fail due to lack of ELF header so just do a few + let mut rng = rand::thread_rng(); + let range = Uniform::new(0, 255); + println!("random bytes"); + for _ in 0..1_000 { + let elf_bytes: Vec = (0..100).map(|_| rng.sample(range)).collect(); + let _ = ElfExecutable::load(&elf_bytes, loader.clone()); + } + + // Take a real elf and mangle it + + let mut file = File::open("tests/elfs/noop.so").expect("file open failed"); + let mut elf_bytes = Vec::new(); + file.read_to_end(&mut elf_bytes) + .expect("failed to read elf file"); + let parsed_elf = NewParser::parse(&elf_bytes).unwrap(); + + // focus on elf header, small typically 64 bytes + println!("mangle elf header"); + fuzz( + &elf_bytes, + 1_000_000, + 100, + 0..parsed_elf.header().e_ehsize as usize, + 0..255, + |bytes: &mut [u8]| { + let _ = ElfExecutable::load(bytes, loader.clone()); + }, + ); + + // focus on section headers + println!("mangle section headers"); + fuzz( + &elf_bytes, + 1_000_000, + 100, + parsed_elf.header().e_shoff as usize..elf_bytes.len(), + 0..255, + |bytes: &mut [u8]| { + let _ = ElfExecutable::load(bytes, loader.clone()); + }, + ); + + // mangle whole elf randomly + println!("mangle whole elf"); + fuzz( + &elf_bytes, + 1_000_000, + 100, + 0..elf_bytes.len(), + 0..255, + |bytes: &mut [u8]| { + let _ = ElfExecutable::load(bytes, loader.clone()); + }, + ); + } + + fn new_section(sh_addr: u64, sh_size: u64) -> Elf64Shdr { + Elf64Shdr { + sh_addr, + sh_offset: sh_addr + .checked_sub(ebpf::MM_PROGRAM_START) + .unwrap_or(sh_addr), + sh_size, + sh_name: 0, + sh_type: 0, + sh_flags: 0, + sh_link: 0, + sh_info: 0, + sh_addralign: 0, + sh_entsize: 0, + } + } + + #[test] + fn test_owned_ro_sections_not_contiguous() { + let config = Config::default(); + let elf_bytes = [0u8; 512]; + + // there's a non-rodata section between two rodata sections + let s1 = new_section(10, 10); + let s2 = new_section(20, 10); + let s3 = new_section(30, 10); + + let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ + (Some(b".text"), &s1), + (Some(b".dynamic"), &s2), + (Some(b".rodata"), &s3), + ]; + assert!(matches!( + ElfExecutable::parse_ro_sections( + &config, + &SBPFVersion::V1, + sections, + &elf_bytes, + ), + Ok(Section::Owned(offset, data)) if offset == 10 && data.len() == 30 + )); + } + + #[test] + fn test_owned_ro_sections_with_sh_offset() { + let config = Config { + reject_broken_elfs: false, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + // s2 is at a custom sh_offset. We need to merge into an owned buffer so + // s2 can be moved to the right address offset. + let s1 = new_section(10, 10); + let mut s2 = new_section(20, 10); + s2.sh_offset = 30; + + let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = + [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; + assert!(matches!( + ElfExecutable::parse_ro_sections( + &config, + &SBPFVersion::V1, + sections, + &elf_bytes, + ), + Ok(Section::Owned(offset, data)) if offset == 10 && data.len() == 20 + )); + } + + #[test] + fn test_sh_offset_not_same_as_vaddr() { + let config = Config { + reject_broken_elfs: true, + enable_sbpf_v2: false, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + let mut s1 = new_section(10, 10); + + { + let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; + assert!(ElfExecutable::parse_ro_sections( + &config, + &SBPFVersion::V1, + sections, + &elf_bytes + ) + .is_ok()); + } + + s1.sh_offset = 0; + let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; + assert_eq!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V1, sections, &elf_bytes), + Err(ElfError::ValueOutOfBounds) + ); + } + + #[test] + fn test_invalid_sh_offset_larger_than_vaddr() { + let config = Config { + reject_broken_elfs: true, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + let s1 = new_section(10, 10); + // sh_offset > sh_addr is invalid + let mut s2 = new_section(20, 10); + s2.sh_offset = 30; + + let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = + [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; + assert_eq!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes,), + Err(ElfError::ValueOutOfBounds) + ); + } + + #[test] + fn test_reject_non_constant_sh_offset() { + let config = Config { + reject_broken_elfs: true, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + let mut s1 = new_section(ebpf::MM_PROGRAM_START + 10, 10); + let mut s2 = new_section(ebpf::MM_PROGRAM_START + 20, 10); + // The sections don't have a constant offset. This is rejected since it + // makes it impossible to efficiently map virtual addresses to byte + // offsets + s1.sh_offset = 100; + s2.sh_offset = 120; + + let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = + [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; + assert_eq!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes), + Err(ElfError::ValueOutOfBounds) + ); + } + + #[test] + fn test_borrowed_ro_sections_with_constant_sh_offset() { + let config = Config { + reject_broken_elfs: true, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + let mut s1 = new_section(ebpf::MM_PROGRAM_START + 10, 10); + let mut s2 = new_section(ebpf::MM_PROGRAM_START + 20, 10); + // the sections have a constant offset (100) + s1.sh_offset = 100; + s2.sh_offset = 110; + + let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = + [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; + assert_eq!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes), + Ok(Section::Borrowed(10, 100..120)) + ); + } + + #[test] + fn test_owned_ro_region_no_initial_gap() { + let config = Config::default(); + let elf_bytes = [0u8; 512]; + + // need an owned buffer so we can zero the address space taken by s2 + let s1 = new_section(0, 10); + let s2 = new_section(10, 10); + let s3 = new_section(20, 10); + + let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ + (Some(b".text"), &s1), + (Some(b".dynamic"), &s2), + (Some(b".rodata"), &s3), + ]; + let ro_section = + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V1, sections, &elf_bytes) + .unwrap(); + let ro_region = get_ro_region(&ro_section, &elf_bytes); + let owned_section = match &ro_section { + Section::Owned(_offset, data) => data.as_slice(), + _ => panic!(), + }; + + // [0..s3.sh_addr + s3.sh_size] is the valid ro memory area + assert!(matches!( + ro_region.vm_to_host(ebpf::MM_PROGRAM_START, s3.sh_addr + s3.sh_size), + ProgramResult::Ok(ptr) if ptr == owned_section.as_ptr() as u64, + )); + + // one byte past the ro section is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size, 1), + "InvalidVirtualAddress({})", + ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size + ); + } + + #[test] + fn test_owned_ro_region_initial_gap_mappable() { + let config = Config { + optimize_rodata: false, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + // the first section starts at a non-zero offset + let s1 = new_section(10, 10); + let s2 = new_section(20, 10); + let s3 = new_section(30, 10); + + let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ + (Some(b".text"), &s1), + (Some(b".dynamic"), &s2), + (Some(b".rodata"), &s3), + ]; + // V2 requires optimize_rodata=true + let ro_section = + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V1, sections, &elf_bytes) + .unwrap(); + let ro_region = get_ro_region(&ro_section, &elf_bytes); + let owned_section = match &ro_section { + Section::Owned(_offset, data) => data.as_slice(), + _ => panic!(), + }; + + // [s1.sh_addr..s3.sh_addr + s3.sh_size] is where the readonly data is. + // But for backwards compatibility (config.optimize_rodata=false) + // [0..s1.sh_addr] is mappable too (and zeroed). + assert!(matches!( + ro_region.vm_to_host(ebpf::MM_PROGRAM_START, s3.sh_addr + s3.sh_size), + ProgramResult::Ok(ptr) if ptr == owned_section.as_ptr() as u64, + )); + + // one byte past the ro section is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size, 1), + "InvalidVirtualAddress({})", + ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size + ); + } + + #[test] + fn test_owned_ro_region_initial_gap_map_error() { + let config = Config::default(); + let elf_bytes = [0u8; 512]; + + // the first section starts at a non-zero offset + let s1 = new_section(10, 10); + let s2 = new_section(20, 10); + let s3 = new_section(30, 10); + + let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ + (Some(b".text"), &s1), + (Some(b".dynamic"), &s2), + (Some(b".rodata"), &s3), + ]; + let ro_section = + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V1, sections, &elf_bytes) + .unwrap(); + let owned_section = match &ro_section { + Section::Owned(_offset, data) => data.as_slice(), + _ => panic!(), + }; + let ro_region = get_ro_region(&ro_section, &elf_bytes); + + // s1 starts at sh_addr=10 so [MM_PROGRAM_START..MM_PROGRAM_START + 10] is not mappable + + // the low bound of the initial gap is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_PROGRAM_START, 1), + "InvalidVirtualAddress({})", + ebpf::MM_PROGRAM_START + ); + + // the hi bound of the initial gap is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s1.sh_addr - 1, 1), + "InvalidVirtualAddress({})", + ebpf::MM_PROGRAM_START + 9 + ); + + // [s1.sh_addr..s3.sh_addr + s3.sh_size] is the valid ro memory area + assert!(matches!( + ro_region.vm_to_host( + ebpf::MM_PROGRAM_START + s1.sh_addr, + s3.sh_addr + s3.sh_size - s1.sh_addr + ), + ProgramResult::Ok(ptr) if ptr == owned_section.as_ptr() as u64, + )); + + // one byte past the ro section is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size, 1), + "InvalidVirtualAddress({})", + ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size + ); + } + + #[test] + fn test_borrowed_ro_sections_disabled() { + let config = Config { + optimize_rodata: false, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + // s1 and s2 are contiguous, the rodata section can be borrowed from the + // original elf input but config.borrow_rodata=false + let s1 = new_section(0, 10); + let s2 = new_section(10, 10); + + let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = + [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; + assert!(matches!( + ElfExecutable::parse_ro_sections( + &config, + &SBPFVersion::V1, // v2 requires optimize_rodata=true + sections, + &elf_bytes, + ), + Ok(Section::Owned(offset, data)) if offset == 0 && data.len() == 20 + )); + } + + #[test] + fn test_borrowed_ro_sections() { + let config = Config::default(); + let elf_bytes = [0u8; 512]; + for (vaddr_base, sbpf_version) in [ + (0, SBPFVersion::V1), + (ebpf::MM_PROGRAM_START, SBPFVersion::V2), + ] { + let s1 = new_section(vaddr_base, 10); + let s2 = new_section(vaddr_base + 20, 10); + let s3 = new_section(vaddr_base + 40, 10); + let s4 = new_section(vaddr_base + 50, 10); + let sections: [(Option<&[u8]>, &Elf64Shdr); 4] = [ + (Some(b".dynsym"), &s1), + (Some(b".text"), &s2), + (Some(b".rodata"), &s3), + (Some(b".dynamic"), &s4), + ]; + assert_eq!( + ElfExecutable::parse_ro_sections(&config, &sbpf_version, sections, &elf_bytes), + Ok(Section::Borrowed(20, 20..50)) + ); + } + } + + #[test] + fn test_borrowed_ro_region_no_initial_gap() { + let config = Config::default(); + let elf_bytes = [0u8; 512]; + for (vaddr_base, sbpf_version) in [ + (0, SBPFVersion::V1), + (ebpf::MM_PROGRAM_START, SBPFVersion::V2), + ] { + let s1 = new_section(vaddr_base, 10); + let s2 = new_section(vaddr_base + 10, 10); + let s3 = new_section(vaddr_base + 20, 10); + let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ + (Some(b".text"), &s1), + (Some(b".rodata"), &s2), + (Some(b".dynamic"), &s3), + ]; + let ro_section = + ElfExecutable::parse_ro_sections(&config, &sbpf_version, sections, &elf_bytes) + .unwrap(); + let ro_region = get_ro_region(&ro_section, &elf_bytes); + + // s1 starts at sh_offset=0 so [0..s2.sh_offset + s2.sh_size] + // is the valid ro memory area + assert!(matches!( + ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s1.sh_offset, s2.sh_offset + s2.sh_size), + ProgramResult::Ok(ptr) if ptr == elf_bytes.as_ptr() as u64, + )); + + // one byte past the ro section is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s3.sh_offset, 1), + "InvalidVirtualAddress({})", + ebpf::MM_PROGRAM_START + s3.sh_offset + ); + } + } + + #[test] + fn test_borrowed_ro_region_initial_gap() { + let config = Config::default(); + let elf_bytes = [0u8; 512]; + for (vaddr_base, sbpf_version) in [ + (0, SBPFVersion::V1), + (ebpf::MM_PROGRAM_START, SBPFVersion::V2), + ] { + let s1 = new_section(vaddr_base, 10); + let s2 = new_section(vaddr_base + 10, 10); + let s3 = new_section(vaddr_base + 20, 10); + let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ + (Some(b".dynamic"), &s1), + (Some(b".text"), &s2), + (Some(b".rodata"), &s3), + ]; + let ro_section = + ElfExecutable::parse_ro_sections(&config, &sbpf_version, sections, &elf_bytes) + .unwrap(); + let ro_region = get_ro_region(&ro_section, &elf_bytes); + + // s2 starts at sh_addr=10 so [0..10] is not mappable + + // the low bound of the initial gap is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s1.sh_offset, 1), + "InvalidVirtualAddress({})", + ebpf::MM_PROGRAM_START + s1.sh_offset + ); + + // the hi bound of the initial gap is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s2.sh_offset - 1, 1), + "InvalidVirtualAddress({})", + ebpf::MM_PROGRAM_START + s2.sh_offset - 1 + ); + + // [s2.sh_offset..s3.sh_offset + s3.sh_size] is the valid ro memory area + assert!(matches!( + ro_region.vm_to_host( + ebpf::MM_PROGRAM_START + s2.sh_offset, + s3.sh_offset + s3.sh_size - s2.sh_offset + ), + ProgramResult::Ok(ptr) if ptr == elf_bytes[s2.sh_offset as usize..].as_ptr() as u64, + )); + + // one byte past the ro section is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s3.sh_offset + s3.sh_size, 1), + "InvalidVirtualAddress({})", + ebpf::MM_PROGRAM_START + s3.sh_offset + s3.sh_size + ); + } + } + + #[test] + fn test_reject_rodata_stack_overlap() { + let config = Config { + enable_sbpf_v2: true, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + // no overlap + let mut s1 = new_section(ebpf::MM_STACK_START - 10, 10); + s1.sh_offset = 0; + let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; + assert!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes) + .is_ok() + ); + + // no overlap + let mut s1 = new_section(ebpf::MM_STACK_START, 0); + s1.sh_offset = 0; + let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; + assert!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes) + .is_ok() + ); + + // overlap + let mut s1 = new_section(ebpf::MM_STACK_START, 1); + s1.sh_offset = 0; + let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; + assert_eq!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes), + Err(ElfError::ValueOutOfBounds) + ); + + // valid start but start + size overlap + let mut s1 = new_section(ebpf::MM_STACK_START - 10, 11); + s1.sh_offset = 0; + let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; + assert_eq!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes), + Err(ElfError::ValueOutOfBounds) + ); + } + + #[test] + #[should_panic(expected = r#"validation failed: WritableSectionNotSupported(".data")"#)] + fn test_writable_data_section() { + let elf_bytes = + std::fs::read("tests/elfs/data_section.so").expect("failed to read elf file"); + ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); + } + + #[test] + #[should_panic(expected = r#"validation failed: WritableSectionNotSupported(".bss")"#)] + fn test_bss_section() { + let elf_bytes = + std::fs::read("tests/elfs/bss_section.so").expect("failed to read elf file"); + ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); + } + + #[test] + #[should_panic(expected = "validation failed: InvalidProgramHeader")] + fn test_program_headers_overflow() { + let elf_bytes = std::fs::read("tests/elfs/program_headers_overflow.so") + .expect("failed to read elf file"); + ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); + } + + #[test] + #[should_panic(expected = "validation failed: RelativeJumpOutOfBounds(9)")] + fn test_relative_call_oob_backward() { + let mut elf_bytes = + std::fs::read("tests/elfs/relative_call.so").expect("failed to read elf file"); + LittleEndian::write_i32(&mut elf_bytes[0x104C..0x1050], -11i32); + ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); + } + + #[test] + #[should_panic(expected = "validation failed: RelativeJumpOutOfBounds(12)")] + fn test_relative_call_oob_forward() { + let mut elf_bytes = + std::fs::read("tests/elfs/relative_call.so").expect("failed to read elf file"); + LittleEndian::write_i32(&mut elf_bytes[0x1064..0x1068], 5); + ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); + } + + #[test] + fn test_long_section_name() { + let elf_bytes = std::fs::read("tests/elfs/long_section_name.so").unwrap(); + assert_error!( + NewParser::parse(&elf_bytes), + "FailedToParse(\"Section or symbol name `{}` is longer than `{}` bytes\")", + ".bss.__rust_no_alloc_shim_is_unstable" + .get(0..SECTION_NAME_LENGTH_MAXIMUM) + .unwrap(), + SECTION_NAME_LENGTH_MAXIMUM + ); + } +} diff --git a/rbpf/src/elf_parser/consts.rs b/rbpf/src/elf_parser/consts.rs new file mode 100644 index 00000000000000..2af2d6374d43f5 --- /dev/null +++ b/rbpf/src/elf_parser/consts.rs @@ -0,0 +1,168 @@ +#![allow(dead_code, missing_docs)] + +use super::types::*; + +pub const ELFMAG: [u8; 4] = [0x7F, 0x45, 0x4C, 0x46]; + +pub const ELFCLASSNONE: u8 = 0; +pub const ELFCLASS32: u8 = 1; +pub const ELFCLASS64: u8 = 2; + +pub const ELFDATANONE: u8 = 0; +pub const ELFDATA2LSB: u8 = 1; +pub const ELFDATA2MSB: u8 = 2; + +pub const EI_OSABI: u8 = 7; +pub const ELFOSABI_NONE: u8 = 0; + +pub const EM_BPF: Elf64Half = 247; +pub const EM_SBPF: Elf64Half = 263; + +pub const ET_NONE: Elf64Half = 0; +pub const ET_REL: Elf64Half = 1; +pub const ET_EXEC: Elf64Half = 2; +pub const ET_DYN: Elf64Half = 3; +pub const ET_CORE: Elf64Half = 4; + +pub const EV_NONE: Elf64Word = 0; +pub const EV_CURRENT: Elf64Word = 1; + +pub const PT_NULL: Elf64Word = 0; +pub const PT_LOAD: Elf64Word = 1; +pub const PT_DYNAMIC: Elf64Word = 2; +pub const PT_INTERP: Elf64Word = 3; +pub const PT_NOTE: Elf64Word = 4; +pub const PT_SHLIB: Elf64Word = 5; +pub const PT_PHDR: Elf64Word = 6; +pub const PT_TLS: Elf64Word = 7; +pub const PT_GNU_EH_FRAME: Elf64Word = 0x6474E550; +pub const PT_GNU_STACK: Elf64Word = 0x6474E551; + +pub const PF_X: Elf64Word = 0x1; +pub const PF_W: Elf64Word = 0x2; +pub const PF_R: Elf64Word = 0x4; + +pub const SHT_NULL: Elf64Word = 0; +pub const SHT_PROGBITS: Elf64Word = 1; +pub const SHT_SYMTAB: Elf64Word = 2; +pub const SHT_STRTAB: Elf64Word = 3; +pub const SHT_RELA: Elf64Word = 4; +pub const SHT_HASH: Elf64Word = 5; +pub const SHT_DYNAMIC: Elf64Word = 6; +pub const SHT_NOTE: Elf64Word = 7; +pub const SHT_NOBITS: Elf64Word = 8; +pub const SHT_REL: Elf64Word = 9; +pub const SHT_SHLIB: Elf64Word = 10; +pub const SHT_DYNSYM: Elf64Word = 11; +pub const SHT_INIT_ARRAY: Elf64Word = 14; +pub const SHT_FINI_ARRAY: Elf64Word = 15; +pub const SHT_PREINIT_ARRAY: Elf64Word = 16; +pub const SHT_GROUP: Elf64Word = 17; +pub const SHT_SYMTAB_SHNDX: Elf64Word = 18; + +pub const SHF_WRITE: Elf64Xword = 0x1; +pub const SHF_ALLOC: Elf64Xword = 0x2; +pub const SHF_EXECINSTR: Elf64Xword = 0x4; +pub const SHF_MERGE: Elf64Xword = 0x10; +pub const SHF_STRINGS: Elf64Xword = 0x20; +pub const SHF_INFO_LINK: Elf64Xword = 0x40; +pub const SHF_LINK_ORDER: Elf64Xword = 0x80; +pub const SHF_OS_NONCONFORMING: Elf64Xword = 0x100; +pub const SHF_GROUP: Elf64Xword = 0x200; +pub const SHF_TLS: Elf64Xword = 0x400; + +pub const SHN_UNDEF: Elf64Half = 0; + +pub const DT_NULL: Elf64Xword = 0; +pub const DT_NEEDED: Elf64Xword = 1; +pub const DT_PLTRELSZ: Elf64Xword = 2; +pub const DT_PLTGOT: Elf64Xword = 3; +pub const DT_HASH: Elf64Xword = 4; +pub const DT_STRTAB: Elf64Xword = 5; +pub const DT_SYMTAB: Elf64Xword = 6; +pub const DT_RELA: Elf64Xword = 7; +pub const DT_RELASZ: Elf64Xword = 8; +pub const DT_RELAENT: Elf64Xword = 9; +pub const DT_STRSZ: Elf64Xword = 10; +pub const DT_SYMENT: Elf64Xword = 11; +pub const DT_INIT: Elf64Xword = 12; +pub const DT_FINI: Elf64Xword = 13; +pub const DT_SONAME: Elf64Xword = 14; +pub const DT_RPATH: Elf64Xword = 15; +pub const DT_SYMBOLIC: Elf64Xword = 16; +pub const DT_REL: Elf64Xword = 17; +pub const DT_RELSZ: Elf64Xword = 18; +pub const DT_RELENT: Elf64Xword = 19; +pub const DT_PLTREL: Elf64Xword = 20; +pub const DT_DEBUG: Elf64Xword = 21; +pub const DT_TEXTREL: Elf64Xword = 22; +pub const DT_JMPREL: Elf64Xword = 23; +pub const DT_BIND_NOW: Elf64Xword = 24; +pub const DT_INIT_ARRAY: Elf64Xword = 25; +pub const DT_FINI_ARRAY: Elf64Xword = 26; +pub const DT_INIT_ARRAYSZ: Elf64Xword = 27; +pub const DT_FINI_ARRAYSZ: Elf64Xword = 28; +pub const DT_RUNPATH: Elf64Xword = 29; +pub const DT_FLAGS: Elf64Xword = 30; +pub const DT_ENCODING: Elf64Xword = 32; +pub const DT_PREINIT_ARRAY: Elf64Xword = 32; +pub const DT_PREINIT_ARRAYSZ: Elf64Xword = 33; +pub const DT_SYMTAB_SHNDX: Elf64Xword = 34; +pub const DT_NUM: usize = 35; + +pub const STT_NOTYPE: u8 = 0; +pub const STT_OBJECT: u8 = 1; +pub const STT_FUNC: u8 = 2; +pub const STT_SECTION: u8 = 3; +pub const STT_FILE: u8 = 4; +pub const STT_COMMON: u8 = 5; +pub const STT_TLS: u8 = 6; +pub const STT_NUM: u8 = 7; +pub const STT_LOOS: u8 = 10; +pub const STT_GNU_IFUNC: u8 = 10; +pub const STT_HIOS: u8 = 12; +pub const STT_LOPROC: u8 = 13; +pub const STT_HIPROC: u8 = 15; + +pub const R_X86_64_NONE: u32 = 0; +pub const R_X86_64_64: u32 = 1; +pub const R_X86_64_PC32: u32 = 2; +pub const R_X86_64_GOT32: u32 = 3; +pub const R_X86_64_PLT32: u32 = 4; +pub const R_X86_64_COPY: u32 = 5; +pub const R_X86_64_GLOB_DAT: u32 = 6; +pub const R_X86_64_JUMP_SLOT: u32 = 7; +pub const R_X86_64_RELATIVE: u32 = 8; +pub const R_X86_64_GOTPCREL: u32 = 9; +pub const R_X86_64_32: u32 = 10; +pub const R_X86_64_32S: u32 = 11; +pub const R_X86_64_16: u32 = 12; +pub const R_X86_64_PC16: u32 = 13; +pub const R_X86_64_8: u32 = 14; +pub const R_X86_64_PC8: u32 = 15; +pub const R_X86_64_DTPMOD64: u32 = 16; +pub const R_X86_64_DTPOFF64: u32 = 17; +pub const R_X86_64_TPOFF64: u32 = 18; +pub const R_X86_64_TLSGD: u32 = 19; +pub const R_X86_64_TLSLD: u32 = 20; +pub const R_X86_64_DTPOFF32: u32 = 21; +pub const R_X86_64_GOTTPOFF: u32 = 22; +pub const R_X86_64_TPOFF32: u32 = 23; +pub const R_X86_64_PC64: u32 = 24; +pub const R_X86_64_GOTOFF64: u32 = 25; +pub const R_X86_64_GOTPC32: u32 = 26; +pub const R_X86_64_GOT64: u32 = 27; +pub const R_X86_64_GOTPCREL64: u32 = 28; +pub const R_X86_64_GOTPC64: u32 = 29; +pub const R_X86_64_GOTPLT64: u32 = 30; +pub const R_X86_64_PLTOFF64: u32 = 31; +pub const R_X86_64_SIZE32: u32 = 32; +pub const R_X86_64_SIZE64: u32 = 33; +pub const R_X86_64_GOTPC32_TLSDESC: u32 = 34; +pub const R_X86_64_TLSDESC_CALL: u32 = 35; +pub const R_X86_64_TLSDESC: u32 = 36; +pub const R_X86_64_IRELATIVE: u32 = 37; +pub const R_X86_64_RELATIVE64: u32 = 38; +pub const R_X86_64_GOTPCRELX: u32 = 41; +pub const R_X86_64_REX_GOTPCRELX: u32 = 42; +pub const R_X86_64_NUM: u32 = 43; diff --git a/rbpf/src/elf_parser/mod.rs b/rbpf/src/elf_parser/mod.rs new file mode 100644 index 00000000000000..bddcfeb4f74d97 --- /dev/null +++ b/rbpf/src/elf_parser/mod.rs @@ -0,0 +1,586 @@ +//! Dependency-less 64 bit ELF parser + +pub mod consts; +pub mod types; + +use std::{fmt, mem, ops::Range, slice}; + +use crate::{ArithmeticOverflow, ErrCheckedArithmetic}; +use {consts::*, types::*}; + +/// Maximum length of section name allowed. +pub const SECTION_NAME_LENGTH_MAXIMUM: usize = 16; +const SYMBOL_NAME_LENGTH_MAXIMUM: usize = 64; + +/// Error definitions +#[derive(Debug, PartialEq, Eq, thiserror::Error)] +pub enum ElfParserError { + /// ELF file header is inconsistent or unsupported + #[error("invalid file header")] + InvalidFileHeader, + /// Program header is inconsistent or unsupported + #[error("invalid program header")] + InvalidProgramHeader, + /// Section header is inconsistent or unsupported + #[error("invalid section header")] + InvalidSectionHeader, + /// Section or symbol name is not UTF8 or too long + #[error("invalid string")] + InvalidString, + /// Section or symbol name is too long + #[error("Section or symbol name `{0}` is longer than `{1}` bytes")] + StringTooLong(String, usize), + /// An index or memory range does exeed its boundaries + #[error("value out of bounds")] + OutOfBounds, + /// The size isn't valid + #[error("invalid size")] + InvalidSize, + /// Headers, tables or sections do overlap in the file + #[error("values overlap")] + Overlap, + /// Sections are not sorted in ascending order + #[error("sections not in ascending order")] + SectionNotInOrder, + /// No section name string table present in the file + #[error("no section name string table found")] + NoSectionNameStringTable, + /// Invalid .dynamic section table + #[error("invalid dynamic section table")] + InvalidDynamicSectionTable, + /// Invalid relocation table + #[error("invalid relocation table")] + InvalidRelocationTable, + /// Invalid alignment + #[error("invalid alignment")] + InvalidAlignment, + /// No string table + #[error("no string table")] + NoStringTable, + /// No dynamic string table + #[error("no dynamic string table")] + NoDynamicStringTable, +} + +fn check_that_there_is_no_overlap( + range_a: &Range, + range_b: &Range, +) -> Result<(), ElfParserError> { + if range_a.end <= range_b.start || range_b.end <= range_a.start { + Ok(()) + } else { + Err(ElfParserError::Overlap) + } +} + +/// The parsed structure of an ELF file +pub struct Elf64<'a> { + elf_bytes: &'a [u8], + file_header: &'a Elf64Ehdr, + program_header_table: &'a [Elf64Phdr], + section_header_table: &'a [Elf64Shdr], + section_names_section_header: Option<&'a Elf64Shdr>, + symbol_section_header: Option<&'a Elf64Shdr>, + symbol_names_section_header: Option<&'a Elf64Shdr>, + dynamic_table: [Elf64Xword; DT_NUM], + dynamic_relocations_table: Option<&'a [Elf64Rel]>, + dynamic_symbol_table: Option<&'a [Elf64Sym]>, + dynamic_symbol_names_section_header: Option<&'a Elf64Shdr>, +} + +impl<'a> Elf64<'a> { + /// Parse from the given byte slice + pub fn parse(elf_bytes: &'a [u8]) -> Result { + let file_header_range = 0..mem::size_of::(); + let file_header_bytes = elf_bytes + .get(file_header_range.clone()) + .ok_or(ElfParserError::OutOfBounds)?; + let ptr = file_header_bytes.as_ptr(); + if (ptr as usize) + .checked_rem(mem::align_of::()) + .map(|remaining| remaining != 0) + .unwrap_or(true) + { + return Err(ElfParserError::InvalidAlignment); + } + let file_header = unsafe { &*ptr.cast::() }; + + if file_header.e_ident.ei_mag != ELFMAG + || file_header.e_ident.ei_class != ELFCLASS64 + || file_header.e_ident.ei_data != ELFDATA2LSB + || file_header.e_ident.ei_version != EV_CURRENT as u8 + || file_header.e_version != EV_CURRENT + || file_header.e_ehsize != mem::size_of::() as u16 + || file_header.e_phentsize != mem::size_of::() as u16 + || file_header.e_shentsize != mem::size_of::() as u16 + || file_header.e_shstrndx >= file_header.e_shnum + { + return Err(ElfParserError::InvalidFileHeader); + } + + let program_header_table_range = file_header.e_phoff as usize + ..mem::size_of::() + .err_checked_mul(file_header.e_phnum as usize)? + .err_checked_add(file_header.e_phoff as usize)?; + check_that_there_is_no_overlap(&file_header_range, &program_header_table_range)?; + let program_header_table = + slice_from_bytes::(elf_bytes, program_header_table_range.clone())?; + + let section_header_table_range = file_header.e_shoff as usize + ..mem::size_of::() + .err_checked_mul(file_header.e_shnum as usize)? + .err_checked_add(file_header.e_shoff as usize)?; + check_that_there_is_no_overlap(&file_header_range, §ion_header_table_range)?; + check_that_there_is_no_overlap(&program_header_table_range, §ion_header_table_range)?; + let section_header_table = + slice_from_bytes::(elf_bytes, section_header_table_range.clone())?; + section_header_table + .get(0) + .filter(|section_header| section_header.sh_type == SHT_NULL) + .ok_or(ElfParserError::InvalidSectionHeader)?; + + let mut prev_program_header: Option<&Elf64Phdr> = None; + for program_header in program_header_table { + if program_header.p_type != PT_LOAD { + continue; + } + + if let Some(prev_program_header) = prev_program_header { + // program headers must be ascending + if program_header.p_vaddr < prev_program_header.p_vaddr { + return Err(ElfParserError::InvalidProgramHeader); + } + } + + if program_header + .p_offset + .err_checked_add(program_header.p_filesz)? as usize + > elf_bytes.len() + { + return Err(ElfParserError::OutOfBounds); + } + + prev_program_header = Some(program_header) + } + + let mut offset = 0usize; + for section_header in section_header_table.iter() { + if section_header.sh_type == SHT_NOBITS { + continue; + } + let section_range = section_header.sh_offset as usize + ..(section_header.sh_offset as usize) + .err_checked_add(section_header.sh_size as usize)?; + check_that_there_is_no_overlap(§ion_range, &file_header_range)?; + check_that_there_is_no_overlap(§ion_range, &program_header_table_range)?; + check_that_there_is_no_overlap(§ion_range, §ion_header_table_range)?; + if section_range.start < offset { + return Err(ElfParserError::SectionNotInOrder); + } + if section_range.end > elf_bytes.len() { + return Err(ElfParserError::OutOfBounds); + } + offset = section_range.end; + } + + let section_names_section_header = (file_header.e_shstrndx != SHN_UNDEF) + .then(|| { + section_header_table + .get(file_header.e_shstrndx as usize) + .ok_or(ElfParserError::OutOfBounds) + }) + .transpose()?; + + let mut parser = Self { + elf_bytes, + file_header, + program_header_table, + section_header_table, + section_names_section_header, + symbol_section_header: None, + symbol_names_section_header: None, + dynamic_table: [0; DT_NUM], + dynamic_relocations_table: None, + dynamic_symbol_table: None, + dynamic_symbol_names_section_header: None, + }; + + parser.parse_sections()?; + parser.parse_dynamic()?; + + Ok(parser) + } + + /// Returns the file header. + pub fn file_header(&self) -> &Elf64Ehdr { + self.file_header + } + + /// Returns the program header table. + pub fn program_header_table(&self) -> &[Elf64Phdr] { + self.program_header_table + } + + /// Returns the section header table. + pub fn section_header_table(&self) -> &[Elf64Shdr] { + self.section_header_table + } + + /// Returns the dynamic symbol table. + pub fn dynamic_symbol_table(&self) -> Option<&[Elf64Sym]> { + self.dynamic_symbol_table + } + + /// Returns the dynamic relocations table. + pub fn dynamic_relocations_table(&self) -> Option<&[Elf64Rel]> { + self.dynamic_relocations_table + } + + fn parse_sections(&mut self) -> Result<(), ElfParserError> { + macro_rules! section_header_by_name { + ($self:expr, $section_header:expr, $section_name:expr, + $($name:literal => $field:ident,)*) => { + match $section_name { + $($name => { + if $self.$field.is_some() { + return Err(ElfParserError::InvalidSectionHeader); + } + $self.$field = Some($section_header); + })* + _ => {} + } + } + } + let section_names_section_header = self + .section_names_section_header + .ok_or(ElfParserError::NoSectionNameStringTable)?; + for section_header in self.section_header_table.iter() { + let section_name = self.get_string_in_section( + section_names_section_header, + section_header.sh_name, + SECTION_NAME_LENGTH_MAXIMUM, + )?; + section_header_by_name!( + self, section_header, section_name, + b".symtab" => symbol_section_header, + b".strtab" => symbol_names_section_header, + b".dynstr" => dynamic_symbol_names_section_header, + ) + } + + Ok(()) + } + + fn parse_dynamic(&mut self) -> Result<(), ElfParserError> { + let mut dynamic_table: Option<&[Elf64Dyn]> = None; + + // try to parse PT_DYNAMIC + if let Some(dynamic_program_header) = self + .program_header_table + .iter() + .find(|program_header| program_header.p_type == PT_DYNAMIC) + { + dynamic_table = self.slice_from_program_header(dynamic_program_header).ok(); + } + + // if PT_DYNAMIC does not exist or is invalid (some of our tests have this), + // fallback to parsing SHT_DYNAMIC + if dynamic_table.is_none() { + if let Some(dynamic_section_header) = self + .section_header_table + .iter() + .find(|section_header| section_header.sh_type == SHT_DYNAMIC) + { + dynamic_table = Some( + self.slice_from_section_header(dynamic_section_header) + .map_err(|_| ElfParserError::InvalidDynamicSectionTable)?, + ); + } + } + + // if there are neither PT_DYNAMIC nor SHT_DYNAMIC, this is a static + // file + let dynamic_table = match dynamic_table { + Some(table) => table, + None => return Ok(()), + }; + + // expand Elf64Dyn entries into self.dynamic_table + for dyn_info in dynamic_table { + if dyn_info.d_tag == DT_NULL { + break; + } + + if dyn_info.d_tag as usize >= DT_NUM { + // we don't parse any reserved tags + continue; + } + self.dynamic_table[dyn_info.d_tag as usize] = dyn_info.d_val; + } + + self.dynamic_relocations_table = self.parse_dynamic_relocations()?; + self.dynamic_symbol_table = self.parse_dynamic_symbol_table()?; + + Ok(()) + } + + fn parse_dynamic_relocations(&mut self) -> Result, ElfParserError> { + let vaddr = self.dynamic_table[DT_REL as usize]; + if vaddr == 0 { + return Ok(None); + } + + if self.dynamic_table[DT_RELENT as usize] as usize != mem::size_of::() { + return Err(ElfParserError::InvalidDynamicSectionTable); + } + + let size = self.dynamic_table[DT_RELSZ as usize] as usize; + if size == 0 { + return Err(ElfParserError::InvalidDynamicSectionTable); + } + + let offset = if let Some(program_header) = self.program_header_for_vaddr(vaddr)? { + vaddr + .err_checked_sub(program_header.p_vaddr)? + .err_checked_add(program_header.p_offset)? + } else { + // At least until rust-bpf-sysroot v0.13, we used to generate + // invalid dynamic sections where the address of DT_REL was not + // contained in any program segment. When loading one of those + // files, fallback to relying on section headers. + self.section_header_table + .iter() + .find(|section_header| section_header.sh_addr == vaddr) + .ok_or(ElfParserError::InvalidDynamicSectionTable)? + .sh_offset + } as usize; + + self.slice_from_bytes(offset..offset.err_checked_add(size)?) + .map(Some) + .map_err(|_| ElfParserError::InvalidDynamicSectionTable) + } + + fn parse_dynamic_symbol_table(&mut self) -> Result, ElfParserError> { + let vaddr = self.dynamic_table[DT_SYMTAB as usize]; + if vaddr == 0 { + return Ok(None); + } + + let dynsym_section_header = self + .section_header_table + .iter() + .find(|section_header| section_header.sh_addr == vaddr) + .ok_or(ElfParserError::InvalidDynamicSectionTable)?; + + self.get_symbol_table_of_section(dynsym_section_header) + .map(Some) + } + + /// Query a single string from a section which is marked as SHT_STRTAB + pub fn get_string_in_section( + &self, + section_header: &Elf64Shdr, + offset_in_section: Elf64Word, + maximum_length: usize, + ) -> Result<&'a [u8], ElfParserError> { + if section_header.sh_type != SHT_STRTAB { + return Err(ElfParserError::InvalidSectionHeader); + } + let offset_in_file = + (section_header.sh_offset as usize).err_checked_add(offset_in_section as usize)?; + let string_range = offset_in_file + ..(section_header.sh_offset as usize) + .err_checked_add(section_header.sh_size as usize)? + .min(offset_in_file.err_checked_add(maximum_length)?); + let unterminated_string_bytes = self + .elf_bytes + .get(string_range) + .ok_or(ElfParserError::OutOfBounds)?; + unterminated_string_bytes + .iter() + .position(|byte| *byte == 0x00) + .and_then(|string_length| unterminated_string_bytes.get(0..string_length)) + .ok_or_else(|| { + ElfParserError::StringTooLong( + String::from_utf8_lossy(unterminated_string_bytes).to_string(), + maximum_length, + ) + }) + } + + /// Returns the string corresponding to the given `sh_name` + pub fn section_name(&self, sh_name: Elf64Word) -> Result<&'a [u8], ElfParserError> { + self.get_string_in_section( + self.section_names_section_header + .ok_or(ElfParserError::NoSectionNameStringTable)?, + sh_name, + SECTION_NAME_LENGTH_MAXIMUM, + ) + } + + /// Returns the name of the `st_name` symbol + pub fn symbol_name(&self, st_name: Elf64Word) -> Result<&'a [u8], ElfParserError> { + self.get_string_in_section( + self.symbol_names_section_header + .ok_or(ElfParserError::NoStringTable)?, + st_name, + SYMBOL_NAME_LENGTH_MAXIMUM, + ) + } + + /// Returns the symbol table + pub fn symbol_table(&self) -> Result, ElfParserError> { + self.symbol_section_header + .map(|section_header| self.get_symbol_table_of_section(section_header)) + .transpose() + } + + /// Returns the name of the `st_name` dynamic symbol + pub fn dynamic_symbol_name(&self, st_name: Elf64Word) -> Result<&'a [u8], ElfParserError> { + self.get_string_in_section( + self.dynamic_symbol_names_section_header + .ok_or(ElfParserError::NoDynamicStringTable)?, + st_name, + SYMBOL_NAME_LENGTH_MAXIMUM, + ) + } + + /// Returns the symbol table of a section which is marked as SHT_SYMTAB + pub fn get_symbol_table_of_section( + &self, + section_header: &Elf64Shdr, + ) -> Result<&'a [Elf64Sym], ElfParserError> { + if section_header.sh_type != SHT_SYMTAB && section_header.sh_type != SHT_DYNSYM { + return Err(ElfParserError::InvalidSectionHeader); + } + + self.slice_from_section_header(section_header) + } + + /// Returns the `&[T]` contained in the data described by the given program + /// header + pub fn slice_from_program_header( + &self, + &Elf64Phdr { + p_offset, p_filesz, .. + }: &Elf64Phdr, + ) -> Result<&'a [T], ElfParserError> { + self.slice_from_bytes( + (p_offset as usize)..(p_offset as usize).err_checked_add(p_filesz as usize)?, + ) + } + + /// Returns the `&[T]` contained in the section data described by the given + /// section header + pub fn slice_from_section_header( + &self, + &Elf64Shdr { + sh_offset, sh_size, .. + }: &Elf64Shdr, + ) -> Result<&'a [T], ElfParserError> { + self.slice_from_bytes( + (sh_offset as usize)..(sh_offset as usize).err_checked_add(sh_size as usize)?, + ) + } + + /// Returns the `&[T]` contained at `elf_bytes[offset..size]` + fn slice_from_bytes(&self, range: Range) -> Result<&'a [T], ElfParserError> { + slice_from_bytes(self.elf_bytes, range) + } + + fn program_header_for_vaddr( + &self, + vaddr: Elf64Addr, + ) -> Result, ElfParserError> { + for program_header in self.program_header_table.iter() { + let Elf64Phdr { + p_vaddr, p_memsz, .. + } = program_header; + + if (*p_vaddr..p_vaddr.err_checked_add(*p_memsz)?).contains(&vaddr) { + return Ok(Some(program_header)); + } + } + Ok(None) + } +} + +impl<'a> fmt::Debug for Elf64<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f, "{:#X?}", self.file_header)?; + for program_header in self.program_header_table.iter() { + writeln!(f, "{program_header:#X?}")?; + } + for section_header in self.section_header_table.iter() { + let section_name = self + .get_string_in_section( + self.section_names_section_header.unwrap(), + section_header.sh_name, + SECTION_NAME_LENGTH_MAXIMUM, + ) + .and_then(|name| { + std::str::from_utf8(name).map_err(|_| ElfParserError::InvalidString) + }) + .unwrap(); + writeln!(f, "{section_name}")?; + writeln!(f, "{section_header:#X?}")?; + } + if let Some(section_header) = self.symbol_section_header { + let symbol_table = self.get_symbol_table_of_section(section_header).unwrap(); + writeln!(f, "{symbol_table:#X?}")?; + for symbol in symbol_table.iter() { + if symbol.st_name != 0 { + let symbol_name = self + .get_string_in_section( + self.symbol_names_section_header.unwrap(), + symbol.st_name, + SYMBOL_NAME_LENGTH_MAXIMUM, + ) + .and_then(|name| { + std::str::from_utf8(name).map_err(|_| ElfParserError::InvalidString) + }) + .unwrap(); + writeln!(f, "{symbol_name}")?; + } + } + } + Ok(()) + } +} + +fn slice_from_bytes(bytes: &[u8], range: Range) -> Result<&[T], ElfParserError> { + if range + .len() + .checked_rem(mem::size_of::()) + .map(|remainder| remainder != 0) + .unwrap_or(true) + { + return Err(ElfParserError::InvalidSize); + } + + let bytes = bytes + .get(range.clone()) + .ok_or(ElfParserError::OutOfBounds)?; + + let ptr = bytes.as_ptr(); + if (ptr as usize) + .checked_rem(mem::align_of::()) + .map(|remaining| remaining != 0) + .unwrap_or(true) + { + return Err(ElfParserError::InvalidAlignment); + } + + Ok(unsafe { + slice::from_raw_parts( + ptr.cast(), + range.len().checked_div(mem::size_of::()).unwrap_or(0), + ) + }) +} + +impl From for ElfParserError { + fn from(_: ArithmeticOverflow) -> ElfParserError { + ElfParserError::OutOfBounds + } +} diff --git a/rbpf/src/elf_parser/types.rs b/rbpf/src/elf_parser/types.rs new file mode 100644 index 00000000000000..95378f61e9b567 --- /dev/null +++ b/rbpf/src/elf_parser/types.rs @@ -0,0 +1,92 @@ +#![allow(missing_docs)] + +pub type Elf64Half = u16; +pub type Elf64Word = u32; +pub type Elf64Xword = u64; +pub type Elf64Addr = u64; +pub type Elf64Off = u64; +pub type Elf64Section = u16; + +#[derive(Debug, Clone)] +#[repr(C)] +pub struct ElfIdent { + pub ei_mag: [u8; 4], + pub ei_class: u8, + pub ei_data: u8, + pub ei_version: u8, + pub ei_osabi: u8, + pub ei_abiversion: u8, + pub ei_pad: [u8; 7], +} + +#[derive(Debug, Clone)] +#[repr(C)] +pub struct Elf64Ehdr { + pub e_ident: ElfIdent, + pub e_type: Elf64Half, + pub e_machine: Elf64Half, + pub e_version: Elf64Word, + pub e_entry: Elf64Addr, + pub e_phoff: Elf64Off, + pub e_shoff: Elf64Off, + pub e_flags: Elf64Word, + pub e_ehsize: Elf64Half, + pub e_phentsize: Elf64Half, + pub e_phnum: Elf64Half, + pub e_shentsize: Elf64Half, + pub e_shnum: Elf64Half, + pub e_shstrndx: Elf64Half, +} + +#[derive(Debug, Clone)] +#[repr(C)] +pub struct Elf64Phdr { + pub p_type: Elf64Word, + pub p_flags: Elf64Word, + pub p_offset: Elf64Off, + pub p_vaddr: Elf64Addr, + pub p_paddr: Elf64Addr, + pub p_filesz: Elf64Xword, + pub p_memsz: Elf64Xword, + pub p_align: Elf64Xword, +} + +#[derive(Debug, Clone)] +#[repr(C)] +pub struct Elf64Shdr { + pub sh_name: Elf64Word, + pub sh_type: Elf64Word, + pub sh_flags: Elf64Xword, + pub sh_addr: Elf64Addr, + pub sh_offset: Elf64Off, + pub sh_size: Elf64Xword, + pub sh_link: Elf64Word, + pub sh_info: Elf64Word, + pub sh_addralign: Elf64Xword, + pub sh_entsize: Elf64Xword, +} + +#[derive(Debug, Clone)] +#[repr(C)] +pub struct Elf64Sym { + pub st_name: Elf64Word, + pub st_info: u8, + pub st_other: u8, + pub st_shndx: Elf64Section, + pub st_value: Elf64Addr, + pub st_size: Elf64Xword, +} + +#[derive(Debug, Clone)] +#[repr(C)] +pub struct Elf64Dyn { + pub d_tag: Elf64Xword, + pub d_val: Elf64Xword, +} + +#[derive(Debug, Clone)] +#[repr(C)] +pub struct Elf64Rel { + pub r_offset: Elf64Addr, + pub r_info: Elf64Xword, +} diff --git a/rbpf/src/elf_parser_glue.rs b/rbpf/src/elf_parser_glue.rs new file mode 100644 index 00000000000000..e04fe736a00d95 --- /dev/null +++ b/rbpf/src/elf_parser_glue.rs @@ -0,0 +1,566 @@ +//! Internal ELF parser abstraction. +use std::{borrow::Cow, convert::TryInto, iter, ops::Range, slice}; + +use goblin::{ + elf::{Elf, Header, ProgramHeader, Reloc, SectionHeader, Sym}, + elf64::{ + header::{EI_ABIVERSION, EI_CLASS, EI_DATA, EI_OSABI, EI_VERSION}, + reloc::RelocIterator, + sym::SymIterator, + }, + error::Error as GoblinError, +}; + +use crate::{ + elf::ElfError, + elf_parser::{ + consts::{SHF_ALLOC, SHF_WRITE, SHT_NOBITS, STT_FUNC}, + types::{ + Elf64Addr, Elf64Ehdr, Elf64Off, Elf64Phdr, Elf64Rel, Elf64Shdr, Elf64Sym, Elf64Word, + Elf64Xword, ElfIdent, + }, + Elf64, ElfParserError, + }, + error::EbpfError, +}; + +/// The common trait implemented by LegacyParser and NewParser. +/// +/// This is an internal interface used to isolate the ELF parsing bits and to be +/// able to plug the old goblin parser or the new parser depending on config. +/// +/// The interface is pretty straightforward. The associated types are the types +/// used to represent ELF data. Some return values are `Cow` since goblin +/// returns some data by value, while the new parser always borrows from the +/// underlying file slice. +pub trait ElfParser<'a>: Sized { + /// Program header type. + type ProgramHeader: ElfProgramHeader + 'a; + /// Iterator of program headers. + type ProgramHeaders: Iterator; + + /// Section header type. + type SectionHeader: ElfSectionHeader + 'a; + /// Iterator of section headers + type SectionHeaders: Iterator; + + /// Symbol type. + type Symbol: ElfSymbol + 'a; + /// Iterator of symbols. + type Symbols: Iterator>; + + /// Relocation type. + type Relocation: ElfRelocation + 'a; + /// Iterator of relocations. + type Relocations: Iterator>; + + /// Parses the ELF data included in the buffer. + fn parse(data: &'a [u8]) -> Result; + + /// Returns the file header. + fn header(&self) -> &Elf64Ehdr; + + /// Returns the program headers. + fn program_headers(&'a self) -> Self::ProgramHeaders; + + /// Returns the section headers. + fn section_headers(&'a self) -> Self::SectionHeaders; + + /// Returns the section with the given `name`. + fn section(&self, name: &[u8]) -> Result; + + /// Returns the section name at the given `sh_name` offset. + fn section_name(&self, sh_name: Elf64Word) -> Option<&[u8]>; + + /// Returns the symbols included in the symbol table. + fn symbols(&'a self) -> Self::Symbols; + + /// Returns the symbol name at the given `st_name` offset. + fn symbol_name(&self, st_name: Elf64Word) -> Option<&[u8]>; + + /// Returns the symbols included in the dynamic symbol table. + fn dynamic_symbol(&self, index: Elf64Word) -> Option; + + /// Returns the dynamic symbol name at the given `st_name` offset. + fn dynamic_symbol_name(&self, st_name: Elf64Word) -> Option<&[u8]>; + + /// Returns the dynamic relocations. + fn dynamic_relocations(&'a self) -> Self::Relocations; +} + +/// ELF program header. +pub trait ElfProgramHeader { + /// Returns the segment virtual address. + fn p_vaddr(&self) -> Elf64Addr; + + /// Returns the segment size when loaded in memory. + fn p_memsz(&self) -> Elf64Xword; + + /// Returns the segment file offset. + fn p_offset(&self) -> Elf64Off; + + /// Returns the segment virtual address range. + fn vm_range(&self) -> Range { + let addr = self.p_vaddr(); + addr..addr.saturating_add(self.p_memsz()) + } +} + +/// ELF section header. +pub trait ElfSectionHeader { + /// Returns the section name offset. + fn sh_name(&self) -> Elf64Word; + + /// Returns the section virtual address. + fn sh_addr(&self) -> Elf64Addr; + + /// Returns the section file offset. + fn sh_offset(&self) -> Elf64Off; + + /// Returns the section size. + fn sh_size(&self) -> Elf64Xword; + + /// Returns the section flags. + fn sh_flags(&self) -> Elf64Xword; + + /// Returns the section type. + fn sh_type(&self) -> Elf64Word; + + /// Returns whether the section is writable. + fn is_writable(&self) -> bool { + self.sh_flags() & (SHF_ALLOC | SHF_WRITE) == SHF_ALLOC | SHF_WRITE + } + + /// Returns the byte range the section spans in the file. + fn file_range(&self) -> Option> { + (self.sh_type() != SHT_NOBITS).then(|| { + let offset = self.sh_offset() as usize; + offset..offset.saturating_add(self.sh_size() as usize) + }) + } + + /// Returns the virtual address range. + fn vm_range(&self) -> Range { + let addr = self.sh_addr(); + addr..addr.saturating_add(self.sh_size()) + } +} + +/// ELF symbol. +pub trait ElfSymbol: Clone { + /// Returns the symbol name offset. + fn st_name(&self) -> Elf64Word; + + /// Returns the symbol type and binding attributes. + fn st_info(&self) -> u8; + + /// Returns the value associated with the symbol. + fn st_value(&self) -> Elf64Addr; + + /// Returns whether the symbol is a function. + fn is_function(&self) -> bool { + (self.st_info() & 0xF) == STT_FUNC + } +} + +/// ELF relocation. +pub trait ElfRelocation: Clone { + /// Returns the offset where to apply the relocation. + fn r_offset(&self) -> Elf64Addr; + + /// Returns the relocation type. + fn r_type(&self) -> Elf64Word; + + /// Returns the symbol index. + fn r_sym(&self) -> Elf64Word; +} + +/// The Goblin based ELF parser. +pub struct GoblinParser<'a> { + elf: Elf<'a>, + header: Elf64Ehdr, +} + +impl<'a> ElfParser<'a> for GoblinParser<'a> { + type ProgramHeader = ProgramHeader; + type ProgramHeaders = slice::Iter<'a, ProgramHeader>; + + type SectionHeader = SectionHeader; + type SectionHeaders = slice::Iter<'a, SectionHeader>; + + type Symbol = Sym; + type Symbols = iter::Map, fn(Self::Symbol) -> Cow<'a, Self::Symbol>>; + + type Relocation = Reloc; + type Relocations = + iter::Map, fn(Self::Relocation) -> Cow<'a, Self::Relocation>>; + + fn parse(data: &'a [u8]) -> Result, ElfError> { + let elf = Elf::parse(data)?; + Ok(Self { + header: elf.header.into(), + elf, + }) + } + + fn header(&self) -> &Elf64Ehdr { + &self.header + } + + fn program_headers(&'a self) -> Self::ProgramHeaders { + self.elf.program_headers.iter() + } + + fn section_headers(&'a self) -> Self::SectionHeaders { + self.elf.section_headers.iter() + } + + fn section(&self, name: &[u8]) -> Result { + match self.elf.section_headers.iter().find(|section_header| { + if let Some(this_name) = self.section_name(section_header.sh_name as Elf64Word) { + return this_name == name; + } + false + }) { + Some(section) => Ok(section.clone()), + None => Err(ElfError::SectionNotFound( + std::str::from_utf8(name) + .unwrap_or("UTF-8 error") + .to_string(), + )), + } + } + + fn section_name(&self, sh_name: Elf64Word) -> Option<&[u8]> { + self.elf + .shdr_strtab + .get_at(sh_name as usize) + .map(|name| name.as_bytes()) + } + + fn symbols(&'a self) -> Self::Symbols { + self.elf.syms.iter().map(Cow::Owned) + } + + fn symbol_name(&self, st_name: Elf64Word) -> Option<&[u8]> { + self.elf + .strtab + .get_at(st_name as usize) + .map(|name| name.as_bytes()) + } + + fn dynamic_symbol(&self, index: Elf64Word) -> Option { + self.elf.dynsyms.get(index as usize) + } + + fn dynamic_symbol_name(&self, st_name: Elf64Word) -> Option<&[u8]> { + self.elf + .dynstrtab + .get_at(st_name as usize) + .map(|name| name.as_bytes()) + } + + fn dynamic_relocations(&self) -> Self::Relocations { + self.elf.dynrels.iter().map(Cow::Owned) + } +} + +impl From

for Elf64Ehdr { + fn from(h: Header) -> Self { + Elf64Ehdr { + e_ident: ElfIdent { + ei_mag: h.e_ident[0..4].try_into().unwrap(), + ei_class: h.e_ident[EI_CLASS], + ei_data: h.e_ident[EI_DATA], + ei_version: h.e_ident[EI_VERSION], + ei_osabi: h.e_ident[EI_OSABI], + ei_abiversion: h.e_ident[EI_ABIVERSION], + ei_pad: [0u8; 7], + }, + e_type: h.e_type, + e_machine: h.e_machine, + e_version: h.e_version, + e_entry: h.e_entry, + e_phoff: h.e_phoff, + e_shoff: h.e_shoff, + e_flags: h.e_flags, + e_ehsize: h.e_ehsize, + e_phentsize: h.e_phentsize, + e_phnum: h.e_phnum, + e_shentsize: h.e_shentsize, + e_shnum: h.e_shnum, + e_shstrndx: h.e_shstrndx, + } + } +} + +impl ElfProgramHeader for ProgramHeader { + fn p_vaddr(&self) -> Elf64Addr { + self.p_vaddr + } + + fn p_memsz(&self) -> Elf64Xword { + self.p_memsz + } + + fn p_offset(&self) -> Elf64Off { + self.p_offset + } +} + +impl ElfSectionHeader for SectionHeader { + fn sh_name(&self) -> Elf64Word { + self.sh_name as _ + } + + fn sh_flags(&self) -> Elf64Xword { + self.sh_flags + } + + fn sh_addr(&self) -> Elf64Addr { + self.sh_addr + } + + fn sh_offset(&self) -> Elf64Off { + self.sh_offset + } + + fn sh_size(&self) -> Elf64Xword { + self.sh_size + } + + fn sh_type(&self) -> Elf64Word { + self.sh_type + } +} + +impl ElfSymbol for Sym { + fn st_name(&self) -> Elf64Word { + self.st_name as _ + } + + fn st_info(&self) -> u8 { + self.st_info + } + + fn st_value(&self) -> Elf64Addr { + self.st_value + } +} + +impl ElfRelocation for Reloc { + fn r_offset(&self) -> Elf64Addr { + self.r_offset + } + + fn r_type(&self) -> Elf64Word { + self.r_type + } + + fn r_sym(&self) -> Elf64Word { + self.r_sym as Elf64Word + } +} + +/// The new ELF parser. +#[derive(Debug)] +pub struct NewParser<'a> { + elf: Elf64<'a>, +} + +impl<'a> ElfParser<'a> for NewParser<'a> { + type ProgramHeader = Elf64Phdr; + type ProgramHeaders = slice::Iter<'a, Self::ProgramHeader>; + + type SectionHeader = Elf64Shdr; + type SectionHeaders = slice::Iter<'a, Self::SectionHeader>; + + type Symbol = Elf64Sym; + type Symbols = + iter::Map, fn(&'a Self::Symbol) -> Cow<'a, Self::Symbol>>; + + type Relocation = Elf64Rel; + type Relocations = iter::Map< + slice::Iter<'a, Self::Relocation>, + fn(&'a Self::Relocation) -> Cow<'a, Self::Relocation>, + >; + + fn parse(data: &'a [u8]) -> Result, ElfError> { + Ok(Self { + elf: Elf64::parse(data)?, + }) + } + + fn header(&self) -> &Elf64Ehdr { + self.elf.file_header() + } + + fn program_headers(&'a self) -> Self::ProgramHeaders { + self.elf.program_header_table().iter() + } + + fn section_headers(&'a self) -> Self::SectionHeaders { + self.elf.section_header_table().iter() + } + + fn section(&self, name: &[u8]) -> Result { + for section_header in self.elf.section_header_table() { + if self.elf.section_name(section_header.sh_name)? == name { + return Ok(section_header.clone()); + } + } + + Err(ElfError::SectionNotFound( + std::str::from_utf8(name) + .unwrap_or("UTF-8 error") + .to_string(), + )) + } + + fn section_name(&self, sh_name: Elf64Word) -> Option<&[u8]> { + self.elf.section_name(sh_name).ok() + } + + fn symbols(&'a self) -> Self::Symbols { + self.elf + .symbol_table() + .ok() + .flatten() + .unwrap_or(&[]) + .iter() + .map(Cow::Borrowed) + } + + fn symbol_name(&self, st_name: Elf64Word) -> Option<&[u8]> { + self.elf.symbol_name(st_name).ok() + } + + fn dynamic_symbol(&self, index: Elf64Word) -> Option { + self.elf + .dynamic_symbol_table() + .and_then(|table| table.get(index as usize).cloned()) + } + + fn dynamic_symbol_name(&self, st_name: Elf64Word) -> Option<&[u8]> { + self.elf.dynamic_symbol_name(st_name).ok() + } + + fn dynamic_relocations(&'a self) -> Self::Relocations { + self.elf + .dynamic_relocations_table() + .unwrap_or(&[]) + .iter() + .map(Cow::Borrowed) + } +} + +impl ElfProgramHeader for Elf64Phdr { + fn p_vaddr(&self) -> Elf64Addr { + self.p_vaddr + } + + fn p_memsz(&self) -> Elf64Xword { + self.p_memsz + } + + fn p_offset(&self) -> Elf64Off { + self.p_offset + } +} + +impl ElfSectionHeader for Elf64Shdr { + fn sh_name(&self) -> Elf64Word { + self.sh_name as _ + } + + fn sh_flags(&self) -> Elf64Xword { + self.sh_flags + } + + fn sh_addr(&self) -> Elf64Addr { + self.sh_addr + } + + fn sh_offset(&self) -> Elf64Off { + self.sh_offset + } + + fn sh_size(&self) -> Elf64Xword { + self.sh_size + } + + fn sh_type(&self) -> Elf64Word { + self.sh_type + } +} + +impl ElfSymbol for Elf64Sym { + fn st_name(&self) -> Elf64Word { + self.st_name + } + + fn st_info(&self) -> u8 { + self.st_info + } + + fn st_value(&self) -> Elf64Addr { + self.st_value + } +} + +impl ElfRelocation for Elf64Rel { + fn r_offset(&self) -> Elf64Addr { + self.r_offset + } + + fn r_type(&self) -> Elf64Word { + (self.r_info & 0xFFFFFFFF) as Elf64Word + } + + fn r_sym(&self) -> Elf64Word { + self.r_info.checked_shr(32).unwrap_or(0) as Elf64Word + } +} + +impl From for ElfError { + fn from(err: ElfParserError) -> Self { + match err { + ElfParserError::InvalidSectionHeader + | ElfParserError::InvalidString + | ElfParserError::StringTooLong(_, _) + | ElfParserError::InvalidSize + | ElfParserError::Overlap + | ElfParserError::SectionNotInOrder + | ElfParserError::NoSectionNameStringTable + | ElfParserError::InvalidDynamicSectionTable + | ElfParserError::InvalidRelocationTable + | ElfParserError::InvalidAlignment + | ElfParserError::NoStringTable + | ElfParserError::NoDynamicStringTable + | ElfParserError::InvalidFileHeader => ElfError::FailedToParse(err.to_string()), + ElfParserError::InvalidProgramHeader => ElfError::InvalidProgramHeader, + ElfParserError::OutOfBounds => ElfError::ValueOutOfBounds, + } + } +} + +impl From for ElfError { + fn from(error: GoblinError) -> Self { + match error { + GoblinError::Malformed(string) => Self::FailedToParse(format!("malformed: {string}")), + GoblinError::BadMagic(magic) => Self::FailedToParse(format!("bad magic: {magic:#x}")), + GoblinError::Scroll(error) => Self::FailedToParse(format!("read-write: {error}")), + GoblinError::IO(error) => Self::FailedToParse(format!("io: {error}")), + GoblinError::BufferTooShort(n, error) => { + Self::FailedToParse(format!("buffer too short {n} {error}")) + } + _ => Self::FailedToParse("cause unkown".to_string()), + } + } +} + +impl From for EbpfError { + fn from(error: GoblinError) -> Self { + ElfError::from(error).into() + } +} diff --git a/rbpf/src/error.rs b/rbpf/src/error.rs new file mode 100644 index 00000000000000..2e1fc445bafc05 --- /dev/null +++ b/rbpf/src/error.rs @@ -0,0 +1,195 @@ +// Copyright 2016 6WIND S.A. +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +//! This module contains all the definitions related to eBPF, and some functions permitting to +//! manipulate eBPF instructions. +//! +//! The number of bytes in an instruction, the maximum number of instructions in a program, and +//! also all operation codes are defined here as constants. +//! +//! The structure for an instruction used by this crate, as well as the function to extract it from +//! a program, is also defined in the module. +//! +//! To learn more about these instructions, see the Linux kernel documentation: +//! , or for a shorter version of +//! the list of the operation codes: + +use { + crate::{elf::ElfError, memory_region::AccessType, verifier::VerifierError}, + std::error::Error, +}; + +/// Error definitions +#[derive(Debug, thiserror::Error)] +#[repr(u64)] // discriminant size, used in emit_exception_kind in JIT +pub enum EbpfError { + /// ELF error + #[error("ELF error: {0}")] + ElfError(#[from] ElfError), + /// Function was already registered + #[error("function #{0} was already registered")] + FunctionAlreadyRegistered(usize), + /// Exceeded max BPF to BPF call depth + #[error("exceeded max BPF to BPF call depth")] + CallDepthExceeded, + /// Attempt to exit from root call frame + #[error("attempted to exit root call frame")] + ExitRootCallFrame, + /// Divide by zero" + #[error("divide by zero at BPF instruction")] + DivideByZero, + /// Divide overflow + #[error("division overflow at BPF instruction")] + DivideOverflow, + /// Exceeded max instructions allowed + #[error("attempted to execute past the end of the text segment at BPF instruction")] + ExecutionOverrun, + /// Attempt to call to an address outside the text segment + #[error("callx attempted to call outside of the text segment")] + CallOutsideTextSegment, + /// Exceeded max instructions allowed + #[error("exceeded CUs meter at BPF instruction")] + ExceededMaxInstructions, + /// Program has not been JIT-compiled + #[error("program has not been JIT-compiled")] + JitNotCompiled, + /// Invalid virtual address + #[error("invalid virtual address {0:x?}")] + InvalidVirtualAddress(u64), + /// Memory region index or virtual address space is invalid + #[error("Invalid memory region at index {0}")] + InvalidMemoryRegion(usize), + /// Access violation (general) + #[error("Access violation in {3} section at address {1:#x} of size {2:?}")] + AccessViolation(AccessType, u64, u64, &'static str), + /// Access violation (stack specific) + #[error("Access violation in stack frame {3} at address {1:#x} of size {2:?}")] + StackAccessViolation(AccessType, u64, u64, i64), + /// Invalid instruction + #[error("invalid BPF instruction")] + InvalidInstruction, + /// Unsupported instruction + #[error("unsupported BPF instruction")] + UnsupportedInstruction, + /// Compilation is too big to fit + #[error("Compilation exhausted text segment at BPF instruction {0}")] + ExhaustedTextSegment(usize), + /// Libc function call returned an error + #[error("Libc calling {0} {1:?} returned error code {2}")] + LibcInvocationFailed(&'static str, Vec, i32), + /// Verifier error + #[error("Verifier error: {0}")] + VerifierError(#[from] VerifierError), + /// Syscall error + #[error("Syscall error: {0}")] + SyscallError(Box), +} + +/// Same as `Result` but provides a stable memory layout +#[derive(Debug)] +#[repr(C, u64)] +pub enum StableResult { + /// Success + Ok(T), + /// Failure + Err(E), +} + +impl StableResult { + /// `true` if `Ok` + pub fn is_ok(&self) -> bool { + match self { + Self::Ok(_) => true, + Self::Err(_) => false, + } + } + + /// `true` if `Err` + pub fn is_err(&self) -> bool { + match self { + Self::Ok(_) => false, + Self::Err(_) => true, + } + } + + /// Returns the inner value if `Ok`, panics otherwise + pub fn unwrap(self) -> T { + match self { + Self::Ok(value) => value, + Self::Err(error) => panic!("unwrap {:?}", error), + } + } + + /// Returns the inner error if `Err`, panics otherwise + pub fn unwrap_err(self) -> E { + match self { + Self::Ok(value) => panic!("unwrap_err {:?}", value), + Self::Err(error) => error, + } + } + + /// Maps ok values, leaving error values untouched + pub fn map U>(self, op: O) -> StableResult { + match self { + Self::Ok(value) => StableResult::::Ok(op(value)), + Self::Err(error) => StableResult::::Err(error), + } + } + + /// Maps error values, leaving ok values untouched + pub fn map_err F>(self, op: O) -> StableResult { + match self { + Self::Ok(value) => StableResult::::Ok(value), + Self::Err(error) => StableResult::::Err(op(error)), + } + } + + #[cfg_attr( + any( + not(feature = "jit"), + target_os = "windows", + not(target_arch = "x86_64") + ), + allow(dead_code) + )] + pub(crate) fn discriminant(&self) -> u64 { + unsafe { *(self as *const _ as *const u64) } + } +} + +impl From> for Result { + fn from(result: StableResult) -> Self { + match result { + StableResult::Ok(value) => Ok(value), + StableResult::Err(value) => Err(value), + } + } +} + +impl From> for StableResult { + fn from(result: Result) -> Self { + match result { + Ok(value) => Self::Ok(value), + Err(value) => Self::Err(value), + } + } +} + +/// Return value of programs and syscalls +pub type ProgramResult = StableResult; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_program_result_is_stable() { + let ok = ProgramResult::Ok(42); + assert_eq!(ok.discriminant(), 0); + let err = ProgramResult::Err(EbpfError::JitNotCompiled); + assert_eq!(err.discriminant(), 1); + } +} diff --git a/rbpf/src/fuzz.rs b/rbpf/src/fuzz.rs new file mode 100644 index 00000000000000..f44f5403d5595d --- /dev/null +++ b/rbpf/src/fuzz.rs @@ -0,0 +1,27 @@ +//! This module defines memory regions + +use rand::Rng; +use std::ops::Range; + +/// fuzzing utility function +pub fn fuzz( + bytes: &[u8], + outer_iters: usize, + inner_iters: usize, + offset: Range, + value: Range, + work: F, +) where + F: Fn(&mut [u8]), +{ + let mut rng = rand::thread_rng(); + for _ in 0..outer_iters { + let mut mangled_bytes = bytes.to_vec(); + for _ in 0..inner_iters { + let offset = rng.gen_range(offset.start..offset.end); + let value = rng.gen_range(value.start..value.end); + mangled_bytes[offset] = value; + work(&mut mangled_bytes); + } + } +} diff --git a/rbpf/src/insn_builder.rs b/rbpf/src/insn_builder.rs new file mode 100644 index 00000000000000..179276f93472b8 --- /dev/null +++ b/rbpf/src/insn_builder.rs @@ -0,0 +1,2182 @@ +#![allow(clippy::arithmetic_side_effects)] +// Copyright 2017 Alex Dukhno +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +//! Module provides API to create eBPF programs by Rust programming language + +use crate::ebpf::*; + +/// Represents single eBPF instruction +pub trait Instruction: Sized { + /// returns instruction opt code + fn opt_code_byte(&self) -> u8; + + /// returns destination register + fn get_dst(&self) -> u8 { + self.get_insn().dst + } + + /// returns source register + fn get_src(&self) -> u8 { + self.get_insn().src + } + + /// returns offset bytes + fn get_off(&self) -> i16 { + self.get_insn().off + } + + /// returns immediate value + fn get_imm(&self) -> i64 { + self.get_insn().imm + } + + /// sets destination register + #[must_use] + fn set_dst(mut self, dst: u8) -> Self { + self.get_insn_mut().dst = dst; + self + } + + /// sets source register + #[must_use] + fn set_src(mut self, src: u8) -> Self { + self.get_insn_mut().src = src; + self + } + + /// sets offset bytes + #[must_use] + fn set_off(mut self, offset: i16) -> Self { + self.get_insn_mut().off = offset; + self + } + + /// sets immediate value + #[must_use] + fn set_imm(mut self, imm: i64) -> Self { + self.get_insn_mut().imm = imm; + self + } + + /// get `ebpf::Insn` struct + fn get_insn(&self) -> &Insn; + + /// get mutable `ebpf::Insn` struct + fn get_insn_mut(&mut self) -> &mut Insn; +} + +/// General trait for `Instruction`s and `BpfCode`. +/// Provides functionality to transform `struct` into collection of bytes +pub trait IntoBytes { + /// type of targeted transformation + type Bytes; + + /// consume `Self` with transformation into `Self::Bytes` + fn into_bytes(self) -> Self::Bytes; +} + +/// General implementation of `IntoBytes` for `Instruction` +impl<'i, I: Instruction> IntoBytes for &'i I { + type Bytes = Vec; + + /// transform immutable reference of `Instruction` into `Vec` with size of 8 + /// [ 1 byte , 1 byte , 2 bytes, 4 bytes ] + /// [ OP_CODE, SRC_REG | DST_REG, OFFSET , IMMEDIATE ] + fn into_bytes(self) -> Self::Bytes { + vec![ + self.opt_code_byte(), + self.get_src() << 4 | self.get_dst(), + self.get_off() as u8, + (self.get_off() >> 8) as u8, + self.get_imm() as u8, + (self.get_imm() >> 8) as u8, + (self.get_imm() >> 16) as u8, + (self.get_imm() >> 24) as u8, + ] + } +} + +/// BPF instruction stack in byte representation +#[derive(Default)] +pub struct BpfCode { + instructions: Vec, +} + +impl BpfCode { + /// creates new empty BPF instruction stack + pub fn new() -> Self { + BpfCode { + instructions: vec![], + } + } + + /// create ADD instruction + pub fn add(&mut self, source: Source, arch: Arch) -> Move { + self.mov_internal(source, arch, OpBits::Add) + } + + /// create SUB instruction + pub fn sub(&mut self, source: Source, arch: Arch) -> Move { + self.mov_internal(source, arch, OpBits::Sub) + } + + /// create MUL instruction + pub fn mul(&mut self, source: Source, arch: Arch) -> Move { + self.mov_internal(source, arch, OpBits::Mul) + } + + /// create DIV instruction + pub fn div(&mut self, source: Source, arch: Arch) -> Move { + self.mov_internal(source, arch, OpBits::Div) + } + + /// create OR instruction + pub fn bit_or(&mut self, source: Source, arch: Arch) -> Move { + self.mov_internal(source, arch, OpBits::BitOr) + } + + /// create AND instruction + pub fn bit_and(&mut self, source: Source, arch: Arch) -> Move { + self.mov_internal(source, arch, OpBits::BitAnd) + } + + /// create LSHIFT instruction + pub fn left_shift(&mut self, source: Source, arch: Arch) -> Move { + self.mov_internal(source, arch, OpBits::LShift) + } + + /// create RSHIFT instruction + pub fn right_shift(&mut self, source: Source, arch: Arch) -> Move { + self.mov_internal(source, arch, OpBits::RShift) + } + + /// create NEGATE instruction + pub fn negate(&mut self, arch: Arch) -> Move { + self.mov_internal(Source::Imm, arch, OpBits::Negate) + } + + /// create MOD instruction + pub fn modulo(&mut self, source: Source, arch: Arch) -> Move { + self.mov_internal(source, arch, OpBits::Mod) + } + + /// create XOR instruction + pub fn bit_xor(&mut self, source: Source, arch: Arch) -> Move { + self.mov_internal(source, arch, OpBits::BitXor) + } + + /// create MOV instruction + pub fn mov(&mut self, source: Source, arch: Arch) -> Move { + self.mov_internal(source, arch, OpBits::Mov) + } + + /// create SIGNED RSHIFT instruction + pub fn signed_right_shift(&mut self, source: Source, arch: Arch) -> Move { + self.mov_internal(source, arch, OpBits::SignRShift) + } + + #[inline] + fn mov_internal(&mut self, source: Source, arch_bits: Arch, op_bits: OpBits) -> Move { + Move { + bpf_code: self, + src_bit: source, + op_bits, + arch_bits, + insn: Insn::default(), + } + } + + /// create byte swap instruction + pub fn swap_bytes(&mut self, endian: Endian) -> SwapBytes { + SwapBytes { + bpf_code: self, + endian, + insn: Insn::default(), + } + } + + /// create LOAD instruction, IMMEDIATE is the source + pub fn load(&mut self, mem_size: MemSize) -> Load { + self.load_internal(mem_size, Addressing::Imm, BPF_LD) + } + + /// create ABSOLUTE LOAD instruction + pub fn load_abs(&mut self, mem_size: MemSize) -> Load { + self.load_internal(mem_size, Addressing::Abs, BPF_LD) + } + + /// create INDIRECT LOAD instruction + pub fn load_ind(&mut self, mem_size: MemSize) -> Load { + self.load_internal(mem_size, Addressing::Ind, BPF_LD) + } + + /// create LOAD instruction, MEMORY is the source + pub fn load_x(&mut self, mem_size: MemSize) -> Load { + self.load_internal(mem_size, Addressing::Mem, BPF_LDX) + } + + #[inline] + fn load_internal(&mut self, mem_size: MemSize, addressing: Addressing, source: u8) -> Load { + Load { + bpf_code: self, + addressing, + mem_size, + source, + insn: Insn::default(), + } + } + + /// creates STORE instruction, IMMEDIATE is the source + pub fn store(&mut self, mem_size: MemSize) -> Store { + self.store_internal(mem_size, BPF_IMM) + } + + /// creates STORE instruction, MEMORY is the source + pub fn store_x(&mut self, mem_size: MemSize) -> Store { + self.store_internal(mem_size, BPF_MEM | BPF_STX) + } + + #[inline] + fn store_internal(&mut self, mem_size: MemSize, source: u8) -> Store { + Store { + bpf_code: self, + mem_size, + source, + insn: Insn::default(), + } + } + + /// create unconditional JMP instruction + pub fn jump_unconditional(&mut self) -> Jump { + self.jump_conditional(Cond::Abs, Source::Imm) + } + + /// create conditional JMP instruction + pub fn jump_conditional(&mut self, cond: Cond, src_bit: Source) -> Jump { + Jump { + bpf_code: self, + cond, + src_bit, + insn: Insn::default(), + } + } + + /// create CALL instruction + pub fn call(&mut self) -> FunctionCall { + FunctionCall { + bpf_code: self, + insn: Insn::default(), + } + } + + /// create EXIT instruction + pub fn exit(&mut self) -> Exit { + Exit { + bpf_code: self, + insn: Insn::default(), + } + } +} + +/// Transform `BpfCode` into assemble representation +impl<'a> IntoBytes for &'a BpfCode { + type Bytes = &'a [u8]; + + /// returns `BpfCode` instruction stack as `&[u8]` + fn into_bytes(self) -> Self::Bytes { + self.instructions.as_slice() + } +} + +/// struct to represent `MOV ALU` instructions +pub struct Move<'i> { + bpf_code: &'i mut BpfCode, + src_bit: Source, + op_bits: OpBits, + arch_bits: Arch, + insn: Insn, +} + +impl<'i> Move<'i> { + /// push MOV instruction into BpfCode instruction stack + pub fn push(self) -> &'i mut BpfCode { + let mut asm = self.into_bytes(); + self.bpf_code.instructions.append(&mut asm); + self.bpf_code + } +} + +impl<'i> Instruction for Move<'i> { + fn opt_code_byte(&self) -> u8 { + let op_bits = self.op_bits as u8; + let src_bit = self.src_bit as u8; + let arch_bits = self.arch_bits as u8; + op_bits | src_bit | arch_bits + } + + fn get_insn_mut(&mut self) -> &mut Insn { + &mut self.insn + } + + fn get_insn(&self) -> &Insn { + &self.insn + } +} + +#[derive(Copy, Clone, PartialEq, Eq)] +#[cfg_attr( + feature = "fuzzer-not-safe-for-production", + derive(arbitrary::Arbitrary, Debug) +)] +/// The source of ALU and JMP instructions +pub enum Source { + /// immediate field will be used as a source + Imm = BPF_IMM as isize, + /// src register will be used as a source + Reg = BPF_X as isize, +} + +#[derive(Copy, Clone)] +enum OpBits { + Add = BPF_ADD as isize, + Sub = BPF_SUB as isize, + Mul = BPF_MUL as isize, + Div = BPF_DIV as isize, + BitOr = BPF_OR as isize, + BitAnd = BPF_AND as isize, + LShift = BPF_LSH as isize, + RShift = BPF_RSH as isize, + Negate = BPF_NEG as isize, + Mod = BPF_MOD as isize, + BitXor = BPF_XOR as isize, + Mov = BPF_MOV as isize, + SignRShift = BPF_ARSH as isize, +} + +#[derive(Copy, Clone)] +#[cfg_attr( + feature = "fuzzer-not-safe-for-production", + derive(arbitrary::Arbitrary, Debug, PartialEq, Eq) +)] +/// Architecture of instructions +pub enum Arch { + /// 64-bit instructions + X64 = BPF_ALU64 as isize, + /// 32-bit instructions + X32 = BPF_ALU as isize, +} + +/// struct representation of byte swap operation +pub struct SwapBytes<'i> { + bpf_code: &'i mut BpfCode, + endian: Endian, + insn: Insn, +} + +impl<'i> SwapBytes<'i> { + /// push bytes swap instruction into BpfCode instruction stack + pub fn push(self) -> &'i mut BpfCode { + let mut asm = self.into_bytes(); + self.bpf_code.instructions.append(&mut asm); + self.bpf_code + } +} + +impl<'i> Instruction for SwapBytes<'i> { + fn opt_code_byte(&self) -> u8 { + self.endian as u8 + } + + fn get_insn_mut(&mut self) -> &mut Insn { + &mut self.insn + } + + fn get_insn(&self) -> &Insn { + &self.insn + } +} + +#[derive(Copy, Clone)] +#[cfg_attr( + feature = "fuzzer-not-safe-for-production", + derive(arbitrary::Arbitrary, Debug, PartialEq, Eq) +)] +/// Bytes endian +pub enum Endian { + /// Little endian + Little = LE as isize, + /// Big endian + Big = BE as isize, +} + +/// struct representation of LOAD instructions +pub struct Load<'i> { + bpf_code: &'i mut BpfCode, + addressing: Addressing, + mem_size: MemSize, + source: u8, + insn: Insn, +} + +impl<'i> Load<'i> { + /// push LOAD instruction into BpfCode instruction stack + pub fn push(self) -> &'i mut BpfCode { + let mut asm = self.into_bytes(); + self.bpf_code.instructions.append(&mut asm); + self.bpf_code + } +} + +impl<'i> Instruction for Load<'i> { + fn opt_code_byte(&self) -> u8 { + let size = self.mem_size as u8; + let addressing = self.addressing as u8; + addressing | size | self.source + } + + fn get_insn_mut(&mut self) -> &mut Insn { + &mut self.insn + } + + fn get_insn(&self) -> &Insn { + &self.insn + } +} + +/// struct representation of STORE instructions +pub struct Store<'i> { + bpf_code: &'i mut BpfCode, + mem_size: MemSize, + source: u8, + insn: Insn, +} + +impl<'i> Store<'i> { + /// push STORE instruction into BpfCode instruction stack + pub fn push(self) -> &'i mut BpfCode { + let mut asm = self.into_bytes(); + self.bpf_code.instructions.append(&mut asm); + self.bpf_code + } +} + +impl<'i> Instruction for Store<'i> { + fn opt_code_byte(&self) -> u8 { + let size = self.mem_size as u8; + BPF_MEM | BPF_ST | size | self.source + } + + fn get_insn_mut(&mut self) -> &mut Insn { + &mut self.insn + } + + fn get_insn(&self) -> &Insn { + &self.insn + } +} + +#[derive(Copy, Clone)] +#[cfg_attr( + feature = "fuzzer-not-safe-for-production", + derive(arbitrary::Arbitrary, Debug, PartialEq, Eq) +)] +/// Memory size for LOAD and STORE instructions +pub enum MemSize { + /// 8-bit size + Byte = BPF_B as isize, + /// 16-bit size + HalfWord = BPF_H as isize, + /// 32-bit size + Word = BPF_W as isize, + /// 64-bit size + DoubleWord = BPF_DW as isize, +} + +#[derive(Copy, Clone)] +enum Addressing { + Imm = BPF_IMM as isize, + Abs = BPF_ABS as isize, + Ind = BPF_IND as isize, + Mem = BPF_MEM as isize, +} + +/// struct representation of JMP instructions +pub struct Jump<'i> { + bpf_code: &'i mut BpfCode, + cond: Cond, + src_bit: Source, + insn: Insn, +} + +impl<'i> Jump<'i> { + /// push JMP instruction into BpfCode instruction stack + pub fn push(self) -> &'i mut BpfCode { + let mut asm = self.into_bytes(); + self.bpf_code.instructions.append(&mut asm); + self.bpf_code + } +} + +impl<'i> Instruction for Jump<'i> { + fn opt_code_byte(&self) -> u8 { + let cmp: u8 = self.cond as u8; + let src_bit = self.src_bit as u8; + cmp | src_bit | BPF_JMP + } + + fn get_insn_mut(&mut self) -> &mut Insn { + &mut self.insn + } + + fn get_insn(&self) -> &Insn { + &self.insn + } +} + +#[derive(Copy, Clone, PartialEq, Eq)] +#[cfg_attr( + feature = "fuzzer-not-safe-for-production", + derive(arbitrary::Arbitrary, Debug) +)] +/// Conditions for JMP instructions +pub enum Cond { + /// Absolute or unconditional + Abs = BPF_JA as isize, + /// Jump if `==` + Equals = BPF_JEQ as isize, + /// Jump if `>` + Greater = BPF_JGT as isize, + /// Jump if `>=` + GreaterEquals = BPF_JGE as isize, + /// Jump if `<` + Lower = BPF_JLT as isize, + /// Jump if `<=` + LowerEquals = BPF_JLE as isize, + /// Jump if `src` & `dst` + BitAnd = BPF_JSET as isize, + /// Jump if `!=` + NotEquals = BPF_JNE as isize, + /// Jump if `>` (signed) + GreaterSigned = BPF_JSGT as isize, + /// Jump if `>=` (signed) + GreaterEqualsSigned = BPF_JSGE as isize, + /// Jump if `<` (signed) + LowerSigned = BPF_JSLT as isize, + /// Jump if `<=` (signed) + LowerEqualsSigned = BPF_JSLE as isize, +} + +/// struct representation of CALL instruction +pub struct FunctionCall<'i> { + bpf_code: &'i mut BpfCode, + insn: Insn, +} + +impl<'i> FunctionCall<'i> { + /// push CALL instruction into BpfCode instruction stack + pub fn push(self) -> &'i mut BpfCode { + let mut asm = self.into_bytes(); + self.bpf_code.instructions.append(&mut asm); + self.bpf_code + } +} + +impl<'i> Instruction for FunctionCall<'i> { + fn opt_code_byte(&self) -> u8 { + BPF_CALL | BPF_JMP + } + + fn get_insn_mut(&mut self) -> &mut Insn { + &mut self.insn + } + + fn get_insn(&self) -> &Insn { + &self.insn + } +} + +/// struct representation of EXIT instruction +pub struct Exit<'i> { + bpf_code: &'i mut BpfCode, + insn: Insn, +} + +impl<'i> Exit<'i> { + /// push EXIT instruction into BpfCode instruction stack + pub fn push(self) -> &'i mut BpfCode { + let mut asm = self.into_bytes(); + self.bpf_code.instructions.append(&mut asm); + self.bpf_code + } +} + +impl<'i> Instruction for Exit<'i> { + fn opt_code_byte(&self) -> u8 { + BPF_EXIT | BPF_JMP + } + + fn get_insn_mut(&mut self) -> &mut Insn { + &mut self.insn + } + + fn get_insn(&self) -> &Insn { + &self.insn + } +} + +#[cfg(test)] +mod tests { + #[cfg(test)] + mod special { + use super::super::*; + + #[test] + fn call_immediate() { + let mut program = BpfCode::new(); + program.call().set_imm(0x11_22_33_44).push(); + + assert_eq!( + program.into_bytes(), + &[0x85, 0x00, 0x00, 0x00, 0x44, 0x33, 0x22, 0x11] + ); + } + + #[test] + fn exit_operation() { + let mut program = BpfCode::new(); + program.exit().push(); + + assert_eq!( + program.into_bytes(), + &[0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + } + + #[cfg(test)] + mod jump_instructions { + #[cfg(test)] + mod register { + use super::super::super::*; + + #[test] + fn jump_on_dst_equals_src() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::Equals, Source::Reg) + .set_dst(0x01) + .set_src(0x02) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x1d, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_greater_than_src() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::Greater, Source::Reg) + .set_dst(0x03) + .set_src(0x02) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x2d, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_greater_or_equals_to_src() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::GreaterEquals, Source::Reg) + .set_dst(0x04) + .set_src(0x01) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x3d, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_lower_than_src() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::Lower, Source::Reg) + .set_dst(0x03) + .set_src(0x02) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xad, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_lower_or_equals_to_src() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::LowerEquals, Source::Reg) + .set_dst(0x04) + .set_src(0x01) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xbd, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_bit_and_with_src_not_equal_zero() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::BitAnd, Source::Reg) + .set_dst(0x05) + .set_src(0x02) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x4d, 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_not_equals_src() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::NotEquals, Source::Reg) + .set_dst(0x03) + .set_src(0x05) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x5d, 0x53, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_greater_than_src_signed() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::GreaterSigned, Source::Reg) + .set_dst(0x04) + .set_src(0x01) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x6d, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_greater_or_equals_src_signed() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::GreaterEqualsSigned, Source::Reg) + .set_dst(0x01) + .set_src(0x03) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x7d, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_lower_than_src_signed() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::LowerSigned, Source::Reg) + .set_dst(0x04) + .set_src(0x01) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xcd, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_lower_or_equals_src_signed() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::LowerEqualsSigned, Source::Reg) + .set_dst(0x01) + .set_src(0x03) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xdd, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + } + + #[cfg(test)] + mod immediate { + use super::super::super::*; + + #[test] + fn jump_to_label() { + let mut program = BpfCode::new(); + program.jump_unconditional().set_off(0x00_11).push(); + + assert_eq!( + program.into_bytes(), + &[0x05, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_equals_const() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::Equals, Source::Imm) + .set_dst(0x01) + .set_imm(0x00_11_22_33) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x15, 0x01, 0x00, 0x00, 0x33, 0x22, 0x11, 0x00] + ); + } + + #[test] + fn jump_on_dst_greater_than_const() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::Greater, Source::Imm) + .set_dst(0x02) + .set_imm(0x00_11_00_11) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x25, 0x02, 0x00, 0x00, 0x11, 0x00, 0x11, 0x00] + ); + } + + #[test] + fn jump_on_dst_greater_or_equals_to_const() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::GreaterEquals, Source::Imm) + .set_dst(0x04) + .set_imm(0x00_22_11_00) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x35, 0x04, 0x00, 0x00, 0x00, 0x11, 0x22, 0x00] + ); + } + + #[test] + fn jump_on_dst_lower_than_const() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::Lower, Source::Imm) + .set_dst(0x02) + .set_imm(0x00_11_00_11) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xa5, 0x02, 0x00, 0x00, 0x11, 0x00, 0x11, 0x00] + ); + } + + #[test] + fn jump_on_dst_lower_or_equals_to_const() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::LowerEquals, Source::Imm) + .set_dst(0x04) + .set_imm(0x00_22_11_00) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xb5, 0x04, 0x00, 0x00, 0x00, 0x11, 0x22, 0x00] + ); + } + + #[test] + fn jump_on_dst_bit_and_with_const_not_equal_zero() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::BitAnd, Source::Imm) + .set_dst(0x05) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x45, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_not_equals_const() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::NotEquals, Source::Imm) + .set_dst(0x03) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x55, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_greater_than_const_signed() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::GreaterSigned, Source::Imm) + .set_dst(0x04) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x65, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_greater_or_equals_src_signed() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::GreaterEqualsSigned, Source::Imm) + .set_dst(0x01) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x75, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_lower_than_const_signed() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::LowerSigned, Source::Imm) + .set_dst(0x04) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xc5, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn jump_on_dst_lower_or_equals_src_signed() { + let mut program = BpfCode::new(); + program + .jump_conditional(Cond::LowerEqualsSigned, Source::Imm) + .set_dst(0x01) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xd5, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + } + } + + #[cfg(test)] + mod store_instructions { + use super::super::*; + + #[test] + fn store_word_from_dst_into_immediate_address() { + let mut program = BpfCode::new(); + program + .store(MemSize::Word) + .set_dst(0x01) + .set_off(0x00_11) + .set_imm(0x11_22_33_44) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x62, 0x01, 0x11, 0x00, 0x44, 0x33, 0x22, 0x11] + ); + } + + #[test] + fn store_half_word_from_dst_into_immediate_address() { + let mut program = BpfCode::new(); + program + .store(MemSize::HalfWord) + .set_dst(0x02) + .set_off(0x11_22) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x6a, 0x02, 0x22, 0x11, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn store_byte_from_dst_into_immediate_address() { + let mut program = BpfCode::new(); + program.store(MemSize::Byte).push(); + + assert_eq!( + program.into_bytes(), + &[0x72, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn store_double_word_from_dst_into_immediate_address() { + let mut program = BpfCode::new(); + program.store(MemSize::DoubleWord).push(); + + assert_eq!( + program.into_bytes(), + &[0x7a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn store_word_from_dst_into_src_address() { + let mut program = BpfCode::new(); + program + .store_x(MemSize::Word) + .set_dst(0x01) + .set_src(0x02) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x63, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn store_half_word_from_dst_into_src_address() { + let mut program = BpfCode::new(); + program.store_x(MemSize::HalfWord).push(); + + assert_eq!( + program.into_bytes(), + &[0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn store_byte_from_dst_into_src_address() { + let mut program = BpfCode::new(); + program.store_x(MemSize::Byte).push(); + + assert_eq!( + program.into_bytes(), + &[0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn store_double_word_from_dst_into_src_address() { + let mut program = BpfCode::new(); + program.store_x(MemSize::DoubleWord).push(); + + assert_eq!( + program.into_bytes(), + &[0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + } + + #[cfg(test)] + mod load_instructions { + #[cfg(test)] + mod register { + use super::super::super::*; + + #[test] + fn load_word_from_set_src_with_offset() { + let mut program = BpfCode::new(); + program + .load_x(MemSize::Word) + .set_dst(0x01) + .set_src(0x02) + .set_off(0x00_02) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x61, 0x21, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn load_half_word_from_set_src_with_offset() { + let mut program = BpfCode::new(); + program + .load_x(MemSize::HalfWord) + .set_dst(0x02) + .set_src(0x01) + .set_off(0x11_22) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x69, 0x12, 0x22, 0x11, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn load_byte_from_set_src_with_offset() { + let mut program = BpfCode::new(); + program + .load_x(MemSize::Byte) + .set_dst(0x01) + .set_src(0x04) + .set_off(0x00_11) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x71, 0x41, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn load_double_word_from_set_src_with_offset() { + let mut program = BpfCode::new(); + program + .load_x(MemSize::DoubleWord) + .set_dst(0x04) + .set_src(0x05) + .set_off(0x44_55) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x79, 0x54, 0x55, 0x44, 0x00, 0x00, 0x00, 0x00] + ); + } + } + + #[cfg(test)] + mod immediate { + use super::super::super::*; + + #[test] + fn load_double_word() { + let mut program = BpfCode::new(); + program + .load(MemSize::DoubleWord) + .set_dst(0x01) + .set_imm(0x00_01_02_03) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x18, 0x01, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00] + ); + } + + #[test] + fn load_abs_word() { + let mut program = BpfCode::new(); + program.load_abs(MemSize::Word).push(); + + assert_eq!( + program.into_bytes(), + &[0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn load_abs_half_word() { + let mut program = BpfCode::new(); + program.load_abs(MemSize::HalfWord).set_dst(0x05).push(); + + assert_eq!( + program.into_bytes(), + &[0x28, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn load_abs_byte() { + let mut program = BpfCode::new(); + program.load_abs(MemSize::Byte).set_dst(0x01).push(); + + assert_eq!( + program.into_bytes(), + &[0x30, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn load_abs_double_word() { + let mut program = BpfCode::new(); + program + .load_abs(MemSize::DoubleWord) + .set_dst(0x01) + .set_imm(0x01_02_03_04) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x38, 0x01, 0x00, 0x00, 0x04, 0x03, 0x02, 0x01] + ); + } + + #[test] + fn load_indirect_word() { + let mut program = BpfCode::new(); + program.load_ind(MemSize::Word).push(); + + assert_eq!( + program.into_bytes(), + &[0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn load_indirect_half_word() { + let mut program = BpfCode::new(); + program.load_ind(MemSize::HalfWord).push(); + + assert_eq!( + program.into_bytes(), + &[0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn load_indirect_byte() { + let mut program = BpfCode::new(); + program.load_ind(MemSize::Byte).push(); + + assert_eq!( + program.into_bytes(), + &[0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn load_indirect_double_word() { + let mut program = BpfCode::new(); + program.load_ind(MemSize::DoubleWord).push(); + + assert_eq!( + program.into_bytes(), + &[0x58, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + } + } + + #[cfg(test)] + mod byte_swap_instructions { + use super::super::*; + + #[test] + fn convert_host_to_little_endian_16bits() { + let mut program = BpfCode::new(); + program + .swap_bytes(Endian::Little) + .set_dst(0x01) + .set_imm(0x00_00_00_10) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xd4, 0x01, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn convert_host_to_little_endian_32bits() { + let mut program = BpfCode::new(); + program + .swap_bytes(Endian::Little) + .set_dst(0x02) + .set_imm(0x00_00_00_20) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xd4, 0x02, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn convert_host_to_little_endian_64bit() { + let mut program = BpfCode::new(); + program + .swap_bytes(Endian::Little) + .set_dst(0x03) + .set_imm(0x00_00_00_40) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xd4, 0x03, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn convert_host_to_big_endian_16bits() { + let mut program = BpfCode::new(); + program + .swap_bytes(Endian::Big) + .set_dst(0x01) + .set_imm(0x00_00_00_10) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xdc, 0x01, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn convert_host_to_big_endian_32bits() { + let mut program = BpfCode::new(); + program + .swap_bytes(Endian::Big) + .set_dst(0x02) + .set_imm(0x00_00_00_20) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xdc, 0x02, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn convert_host_to_big_endian_64bit() { + let mut program = BpfCode::new(); + program + .swap_bytes(Endian::Big) + .set_dst(0x03) + .set_imm(0x00_00_00_40) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xdc, 0x03, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00] + ); + } + } + + #[cfg(test)] + mod moves_instructions { + #[cfg(test)] + mod arch_x64 { + #[cfg(test)] + mod immediate { + use super::super::super::super::*; + + #[test] + fn move_and_add_const_to_register() { + let mut program = BpfCode::new(); + program + .add(Source::Imm, Arch::X64) + .set_dst(0x02) + .set_imm(0x01_02_03_04) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x07, 0x02, 0x00, 0x00, 0x04, 0x03, 0x02, 0x01] + ); + } + + #[test] + fn move_sub_const_to_register() { + let mut program = BpfCode::new(); + program + .sub(Source::Imm, Arch::X64) + .set_dst(0x04) + .set_imm(0x00_01_02_03) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x17, 0x04, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00] + ); + } + + #[test] + fn move_mul_const_to_register() { + let mut program = BpfCode::new(); + program + .mul(Source::Imm, Arch::X64) + .set_dst(0x05) + .set_imm(0x04_03_02_01) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x27, 0x05, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04] + ); + } + + #[test] + fn move_div_constant_to_register() { + let mut program = BpfCode::new(); + program + .div(Source::Imm, Arch::X64) + .set_dst(0x02) + .set_imm(0x00_ff_00_ff) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x37, 0x02, 0x00, 0x00, 0xff, 0x00, 0xff, 0x00] + ); + } + + #[test] + fn move_bit_or_const_to_register() { + let mut program = BpfCode::new(); + program + .bit_or(Source::Imm, Arch::X64) + .set_dst(0x02) + .set_imm(0x00_11_00_22) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x47, 0x02, 0x00, 0x00, 0x22, 0x00, 0x11, 0x00] + ); + } + + #[test] + fn move_bit_and_const_to_register() { + let mut program = BpfCode::new(); + program + .bit_and(Source::Imm, Arch::X64) + .set_dst(0x02) + .set_imm(0x11_22_33_44) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x57, 0x02, 0x00, 0x00, 0x44, 0x33, 0x22, 0x11] + ); + } + + #[test] + fn move_left_shift_const_to_register() { + let mut program = BpfCode::new(); + program + .left_shift(Source::Imm, Arch::X64) + .set_dst(0x01) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x67, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_logical_right_shift_const_to_register() { + let mut program = BpfCode::new(); + program + .right_shift(Source::Imm, Arch::X64) + .set_dst(0x01) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x77, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_negate_register() { + let mut program = BpfCode::new(); + program.negate(Arch::X64).set_dst(0x02).push(); + + assert_eq!( + program.into_bytes(), + &[0x87, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_mod_const_to_register() { + let mut program = BpfCode::new(); + program.modulo(Source::Imm, Arch::X64).set_dst(0x02).push(); + + assert_eq!( + program.into_bytes(), + &[0x97, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_bit_xor_const_to_register() { + let mut program = BpfCode::new(); + program.bit_xor(Source::Imm, Arch::X64).set_dst(0x03).push(); + + assert_eq!( + program.into_bytes(), + &[0xa7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_const_to_register() { + let mut program = BpfCode::new(); + program + .mov(Source::Imm, Arch::X64) + .set_dst(0x01) + .set_imm(0x00_00_00_FF) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xb7, 0x01, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_signed_right_shift_const_to_register() { + let mut program = BpfCode::new(); + program + .signed_right_shift(Source::Imm, Arch::X64) + .set_dst(0x05) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xc7, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + } + + #[cfg(test)] + mod register { + use super::super::super::super::*; + + #[test] + fn move_and_add_from_register() { + let mut program = BpfCode::new(); + program + .add(Source::Reg, Arch::X64) + .set_dst(0x03) + .set_src(0x02) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x0f, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_sub_from_register_to_register() { + let mut program = BpfCode::new(); + program + .sub(Source::Reg, Arch::X64) + .set_dst(0x03) + .set_src(0x04) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x1f, 0x43, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_mul_from_register_to_register() { + let mut program = BpfCode::new(); + program + .mul(Source::Reg, Arch::X64) + .set_dst(0x04) + .set_src(0x03) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x2f, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_div_from_register_to_register() { + let mut program = BpfCode::new(); + program + .div(Source::Reg, Arch::X64) + .set_dst(0x01) + .set_src(0x00) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x3f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_bit_or_from_register_to_register() { + let mut program = BpfCode::new(); + program + .bit_or(Source::Reg, Arch::X64) + .set_dst(0x03) + .set_src(0x01) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x4f, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_bit_and_from_register_to_register() { + let mut program = BpfCode::new(); + program + .bit_and(Source::Reg, Arch::X64) + .set_dst(0x03) + .set_src(0x02) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x5f, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_left_shift_from_register_to_register() { + let mut program = BpfCode::new(); + program + .left_shift(Source::Reg, Arch::X64) + .set_dst(0x02) + .set_src(0x03) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x6f, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_logical_right_shift_from_register_to_register() { + let mut program = BpfCode::new(); + program + .right_shift(Source::Reg, Arch::X64) + .set_dst(0x02) + .set_src(0x04) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x7f, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_mod_from_register_to_register() { + let mut program = BpfCode::new(); + program + .modulo(Source::Reg, Arch::X64) + .set_dst(0x01) + .set_src(0x02) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x9f, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_bit_xor_from_register_to_register() { + let mut program = BpfCode::new(); + program + .bit_xor(Source::Reg, Arch::X64) + .set_dst(0x02) + .set_src(0x04) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xaf, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_from_register_to_another_register() { + let mut program = BpfCode::new(); + program.mov(Source::Reg, Arch::X64).set_src(0x01).push(); + + assert_eq!( + program.into_bytes(), + &[0xbf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_signed_right_shift_from_register_to_register() { + let mut program = BpfCode::new(); + program + .signed_right_shift(Source::Reg, Arch::X64) + .set_dst(0x02) + .set_src(0x03) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xcf, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + } + } + + #[cfg(test)] + mod arch_x32 { + #[cfg(test)] + mod immediate { + use super::super::super::super::*; + + #[test] + fn move_and_add_const_to_register() { + let mut program = BpfCode::new(); + program + .add(Source::Imm, Arch::X32) + .set_dst(0x02) + .set_imm(0x01_02_03_04) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x04, 0x02, 0x00, 0x00, 0x04, 0x03, 0x02, 0x01] + ); + } + + #[test] + fn move_sub_const_to_register() { + let mut program = BpfCode::new(); + program + .sub(Source::Imm, Arch::X32) + .set_dst(0x04) + .set_imm(0x00_01_02_03) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x14, 0x04, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00] + ); + } + + #[test] + fn move_mul_const_to_register() { + let mut program = BpfCode::new(); + program + .mul(Source::Imm, Arch::X32) + .set_dst(0x05) + .set_imm(0x04_03_02_01) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x24, 0x05, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04] + ); + } + + #[test] + fn move_div_constant_to_register() { + let mut program = BpfCode::new(); + program + .div(Source::Imm, Arch::X32) + .set_dst(0x02) + .set_imm(0x00_ff_00_ff) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x34, 0x02, 0x00, 0x00, 0xff, 0x00, 0xff, 0x00] + ); + } + + #[test] + fn move_bit_or_const_to_register() { + let mut program = BpfCode::new(); + program + .bit_or(Source::Imm, Arch::X32) + .set_dst(0x02) + .set_imm(0x00_11_00_22) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x44, 0x02, 0x00, 0x00, 0x22, 0x00, 0x11, 0x00] + ); + } + + #[test] + fn move_bit_and_const_to_register() { + let mut program = BpfCode::new(); + program + .bit_and(Source::Imm, Arch::X32) + .set_dst(0x02) + .set_imm(0x11_22_33_44) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x54, 0x02, 0x00, 0x00, 0x44, 0x33, 0x22, 0x11] + ); + } + + #[test] + fn move_left_shift_const_to_register() { + let mut program = BpfCode::new(); + program + .left_shift(Source::Imm, Arch::X32) + .set_dst(0x01) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_logical_right_shift_const_to_register() { + let mut program = BpfCode::new(); + program + .right_shift(Source::Imm, Arch::X32) + .set_dst(0x01) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x74, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_negate_register() { + let mut program = BpfCode::new(); + program.negate(Arch::X32).set_dst(0x02).push(); + + assert_eq!( + program.into_bytes(), + &[0x84, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_mod_const_to_register() { + let mut program = BpfCode::new(); + program.modulo(Source::Imm, Arch::X32).set_dst(0x02).push(); + + assert_eq!( + program.into_bytes(), + &[0x94, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_bit_xor_const_to_register() { + let mut program = BpfCode::new(); + program.bit_xor(Source::Imm, Arch::X32).set_dst(0x03).push(); + + assert_eq!( + program.into_bytes(), + &[0xa4, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_const_to_register() { + let mut program = BpfCode::new(); + program + .mov(Source::Imm, Arch::X32) + .set_dst(0x01) + .set_imm(0x00_00_00_FF) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xb4, 0x01, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_signed_right_shift_const_to_register() { + let mut program = BpfCode::new(); + program + .signed_right_shift(Source::Imm, Arch::X32) + .set_dst(0x05) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xc4, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + } + + #[cfg(test)] + mod register { + use super::super::super::super::*; + + #[test] + fn move_and_add_from_register() { + let mut program = BpfCode::new(); + program + .add(Source::Reg, Arch::X32) + .set_dst(0x03) + .set_src(0x02) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x0c, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_sub_from_register_to_register() { + let mut program = BpfCode::new(); + program + .sub(Source::Reg, Arch::X32) + .set_dst(0x03) + .set_src(0x04) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x1c, 0x43, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_mul_from_register_to_register() { + let mut program = BpfCode::new(); + program + .mul(Source::Reg, Arch::X32) + .set_dst(0x04) + .set_src(0x03) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x2c, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_div_from_register_to_register() { + let mut program = BpfCode::new(); + program + .div(Source::Reg, Arch::X32) + .set_dst(0x01) + .set_src(0x00) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x3c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_bit_or_from_register_to_register() { + let mut program = BpfCode::new(); + program + .bit_or(Source::Reg, Arch::X32) + .set_dst(0x03) + .set_src(0x01) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x4c, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_bit_and_from_register_to_register() { + let mut program = BpfCode::new(); + program + .bit_and(Source::Reg, Arch::X32) + .set_dst(0x03) + .set_src(0x02) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x5c, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_left_shift_from_register_to_register() { + let mut program = BpfCode::new(); + program + .left_shift(Source::Reg, Arch::X32) + .set_dst(0x02) + .set_src(0x03) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x6c, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_logical_right_shift_from_register_to_register() { + let mut program = BpfCode::new(); + program + .right_shift(Source::Reg, Arch::X32) + .set_dst(0x02) + .set_src(0x04) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x7c, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_mod_from_register_to_register() { + let mut program = BpfCode::new(); + program + .modulo(Source::Reg, Arch::X32) + .set_dst(0x01) + .set_src(0x02) + .push(); + + assert_eq!( + program.into_bytes(), + &[0x9c, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_bit_xor_from_register_to_register() { + let mut program = BpfCode::new(); + program + .bit_xor(Source::Reg, Arch::X32) + .set_dst(0x02) + .set_src(0x04) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xac, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_from_register_to_another_register() { + let mut program = BpfCode::new(); + program + .mov(Source::Reg, Arch::X32) + .set_dst(0x00) + .set_src(0x01) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xbc, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + + #[test] + fn move_signed_right_shift_from_register_to_register() { + let mut program = BpfCode::new(); + program + .signed_right_shift(Source::Reg, Arch::X32) + .set_dst(0x02) + .set_src(0x03) + .push(); + + assert_eq!( + program.into_bytes(), + &[0xcc, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] + ); + } + } + } + } + + #[cfg(test)] + mod programs { + use super::super::*; + + #[test] + fn example_from_assembler() { + let mut program = BpfCode::new(); + program + .add(Source::Imm, Arch::X64) + .set_dst(1) + .set_imm(0x605) + .push() + .mov(Source::Imm, Arch::X64) + .set_dst(2) + .set_imm(0x32) + .push() + .mov(Source::Reg, Arch::X64) + .set_src(0) + .set_dst(1) + .push() + .swap_bytes(Endian::Big) + .set_dst(0) + .set_imm(0x10) + .push() + .negate(Arch::X64) + .set_dst(2) + .push() + .exit() + .push(); + + let bytecode = program.into_bytes(); + let ref_prog = &[ + 0x07, 0x01, 0x00, 0x00, 0x05, 0x06, 0x00, 0x00, 0xb7, 0x02, 0x00, 0x00, 0x32, 0x00, + 0x00, 0x00, 0xbf, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x00, 0x00, 0x87, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + ]; + // cargo says: "`[{integer}; 48]` cannot be formatted using `{:?}` + // because it doesn't implement `std::fmt::Debug`" + // So let's check in two steps. + assert_eq!(bytecode[..32], ref_prog[..32]); + assert_eq!(bytecode[33..], ref_prog[33..]); + } + } +} diff --git a/rbpf/src/interpreter.rs b/rbpf/src/interpreter.rs new file mode 100644 index 00000000000000..fc72b520a63cb2 --- /dev/null +++ b/rbpf/src/interpreter.rs @@ -0,0 +1,535 @@ +#![allow(clippy::arithmetic_side_effects)] +// Derived from uBPF +// Copyright 2015 Big Switch Networks, Inc +// (uBPF: VM architecture, parts of the interpreter, originally in C) +// Copyright 2016 6WIND S.A. +// (Translation to Rust, MetaBuff/multiple classes addition, hashmaps for syscalls) +// Copyright 2020 Solana Maintainers +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +//! Interpreter for eBPF programs. + +use crate::{ + ebpf::{self, STACK_PTR_REG}, + elf::Executable, + error::{EbpfError, ProgramResult}, + vm::{Config, ContextObject, EbpfVm}, +}; + +/// Virtual memory operation helper. +macro_rules! translate_memory_access { + (_impl, $self:ident, $op:ident, $vm_addr:ident, $T:ty, $($rest:expr),*) => { + match $self.vm.memory_mapping.$op::<$T>( + $($rest,)* + $vm_addr, + ) { + ProgramResult::Ok(v) => v, + ProgramResult::Err(err) => { + throw_error!($self, err); + }, + } + }; + + // MemoryMapping::load() + ($self:ident, load, $vm_addr:ident, $T:ty) => { + translate_memory_access!(_impl, $self, load, $vm_addr, $T,) + }; + + // MemoryMapping::store() + ($self:ident, store, $value:expr, $vm_addr:ident, $T:ty) => { + translate_memory_access!(_impl, $self, store, $vm_addr, $T, ($value) as $T); + }; +} + +macro_rules! throw_error { + ($self:expr, $err:expr) => {{ + $self.vm.registers[11] = $self.reg[11]; + $self.vm.program_result = ProgramResult::Err($err); + return false; + }}; + (DivideByZero; $self:expr, $src:expr, $ty:ty) => { + if $src as $ty == 0 { + throw_error!($self, EbpfError::DivideByZero); + } + }; + (DivideOverflow; $self:expr, $src:expr, $dst:expr, $ty:ty) => { + if $dst as $ty == <$ty>::MIN && $src as $ty == -1 { + throw_error!($self, EbpfError::DivideOverflow); + } + }; +} + +macro_rules! check_pc { + ($self:expr, $next_pc:ident, $target_pc:expr) => { + if ($target_pc as usize) + .checked_mul(ebpf::INSN_SIZE) + .and_then(|offset| $self.program.get(offset..offset + ebpf::INSN_SIZE)) + .is_some() + { + $next_pc = $target_pc; + } else { + throw_error!($self, EbpfError::CallOutsideTextSegment); + } + }; +} + +/// State of the interpreter during a debugging session +#[cfg(feature = "debugger")] +pub enum DebugState { + /// Single step the interpreter + Step, + /// Continue execution till the end or till a breakpoint is hit + Continue, +} + +/// State of an interpreter +pub struct Interpreter<'a, 'b, C: ContextObject> { + pub(crate) vm: &'a mut EbpfVm<'b, C>, + pub(crate) executable: &'a Executable, + pub(crate) program: &'a [u8], + pub(crate) program_vm_addr: u64, + + /// General purpose registers and pc + pub reg: [u64; 12], + + #[cfg(feature = "debugger")] + pub(crate) debug_state: DebugState, + #[cfg(feature = "debugger")] + pub(crate) breakpoints: Vec, +} + +impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { + /// Creates a new interpreter state + pub fn new( + vm: &'a mut EbpfVm<'b, C>, + executable: &'a Executable, + registers: [u64; 12], + ) -> Self { + let (program_vm_addr, program) = executable.get_text_bytes(); + Self { + vm, + executable, + program, + program_vm_addr, + reg: registers, + #[cfg(feature = "debugger")] + debug_state: DebugState::Continue, + #[cfg(feature = "debugger")] + breakpoints: Vec::new(), + } + } + + /// Translate between the virtual machines' pc value and the pc value used by the debugger + #[cfg(feature = "debugger")] + pub fn get_dbg_pc(&self) -> u64 { + (self.reg[11] * ebpf::INSN_SIZE as u64) + self.executable.get_text_section_offset() + } + + fn push_frame(&mut self, config: &Config) -> bool { + let frame = &mut self.vm.call_frames[self.vm.call_depth as usize]; + frame.caller_saved_registers.copy_from_slice( + &self.reg[ebpf::FIRST_SCRATCH_REG..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS], + ); + frame.frame_pointer = self.reg[ebpf::FRAME_PTR_REG]; + frame.target_pc = self.reg[11] + 1; + + self.vm.call_depth += 1; + if self.vm.call_depth as usize == config.max_call_depth { + throw_error!(self, EbpfError::CallDepthExceeded); + } + + if !self.executable.get_sbpf_version().dynamic_stack_frames() { + // With fixed frames we start the new frame at the next fixed offset + let stack_frame_size = + config.stack_frame_size * if config.enable_stack_frame_gaps { 2 } else { 1 }; + self.vm.stack_pointer += stack_frame_size as u64; + } + self.reg[ebpf::FRAME_PTR_REG] = self.vm.stack_pointer; + + true + } + + /// Advances the interpreter state by one instruction + /// + /// Returns false if the program terminated or threw an error. + #[rustfmt::skip] + pub fn step(&mut self) -> bool { + let config = &self.executable.get_config(); + + self.vm.due_insn_count += 1; + let mut next_pc = self.reg[11] + 1; + if next_pc as usize * ebpf::INSN_SIZE > self.program.len() { + throw_error!(self, EbpfError::ExecutionOverrun); + } + let mut insn = ebpf::get_insn_unchecked(self.program, self.reg[11] as usize); + let dst = insn.dst as usize; + let src = insn.src as usize; + + if config.enable_instruction_tracing { + self.vm.context_object_pointer.trace(self.reg); + } + + match insn.opc { + ebpf::ADD64_IMM if dst == STACK_PTR_REG && self.executable.get_sbpf_version().dynamic_stack_frames() => { + // Let the stack overflow. For legitimate programs, this is a nearly + // impossible condition to hit since programs are metered and we already + // enforce a maximum call depth. For programs that intentionally mess + // around with the stack pointer, MemoryRegion::map will return + // InvalidVirtualAddress(stack_ptr) once an invalid stack address is + // accessed. + self.vm.stack_pointer = self.vm.stack_pointer.overflowing_add(insn.imm as u64).0; + } + + ebpf::LD_DW_IMM => { + ebpf::augment_lddw_unchecked(self.program, &mut insn); + self.reg[dst] = insn.imm as u64; + self.reg[11] += 1; + next_pc += 1; + }, + + // BPF_LDX class + ebpf::LD_B_REG => { + let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64; + self.reg[dst] = translate_memory_access!(self, load, vm_addr, u8); + }, + ebpf::LD_H_REG => { + let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64; + self.reg[dst] = translate_memory_access!(self, load, vm_addr, u16); + }, + ebpf::LD_W_REG => { + let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64; + self.reg[dst] = translate_memory_access!(self, load, vm_addr, u32); + }, + ebpf::LD_DW_REG => { + let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64; + self.reg[dst] = translate_memory_access!(self, load, vm_addr, u64); + }, + + // BPF_ST class + ebpf::ST_B_IMM => { + let vm_addr = (self.reg[dst] as i64).wrapping_add( insn.off as i64) as u64; + translate_memory_access!(self, store, insn.imm, vm_addr, u8); + }, + ebpf::ST_H_IMM => { + let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64; + translate_memory_access!(self, store, insn.imm, vm_addr, u16); + }, + ebpf::ST_W_IMM => { + let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64; + translate_memory_access!(self, store, insn.imm, vm_addr, u32); + }, + ebpf::ST_DW_IMM => { + let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64; + translate_memory_access!(self, store, insn.imm, vm_addr, u64); + }, + + // BPF_STX class + ebpf::ST_B_REG => { + let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64; + translate_memory_access!(self, store, self.reg[src], vm_addr, u8); + }, + ebpf::ST_H_REG => { + let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64; + translate_memory_access!(self, store, self.reg[src], vm_addr, u16); + }, + ebpf::ST_W_REG => { + let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64; + translate_memory_access!(self, store, self.reg[src], vm_addr, u32); + }, + ebpf::ST_DW_REG => { + let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64; + translate_memory_access!(self, store, self.reg[src], vm_addr, u64); + }, + + // BPF_ALU class + ebpf::ADD32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_add(insn.imm as i32) as u64, + ebpf::ADD32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_add(self.reg[src] as i32) as u64, + ebpf::SUB32_IMM => if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() { + self.reg[dst] = (insn.imm as i32).wrapping_sub(self.reg[dst] as i32) as u64 + } else { + self.reg[dst] = (self.reg[dst] as i32).wrapping_sub(insn.imm as i32) as u64 + }, + ebpf::SUB32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_sub(self.reg[src] as i32) as u64, + ebpf::MUL32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(insn.imm as i32) as u64, + ebpf::MUL32_REG if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(self.reg[src] as i32) as u64, + ebpf::DIV32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32 / insn.imm as u32) as u64, + ebpf::DIV32_REG if !self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideByZero; self, self.reg[src], u32); + self.reg[dst] = (self.reg[dst] as u32 / self.reg[src] as u32) as u64; + }, + ebpf::OR32_IMM => self.reg[dst] = (self.reg[dst] as u32 | insn.imm as u32) as u64, + ebpf::OR32_REG => self.reg[dst] = (self.reg[dst] as u32 | self.reg[src] as u32) as u64, + ebpf::AND32_IMM => self.reg[dst] = (self.reg[dst] as u32 & insn.imm as u32) as u64, + ebpf::AND32_REG => self.reg[dst] = (self.reg[dst] as u32 & self.reg[src] as u32) as u64, + ebpf::LSH32_IMM => self.reg[dst] = (self.reg[dst] as u32).wrapping_shl(insn.imm as u32) as u64, + ebpf::LSH32_REG => self.reg[dst] = (self.reg[dst] as u32).wrapping_shl(self.reg[src] as u32) as u64, + ebpf::RSH32_IMM => self.reg[dst] = (self.reg[dst] as u32).wrapping_shr(insn.imm as u32) as u64, + ebpf::RSH32_REG => self.reg[dst] = (self.reg[dst] as u32).wrapping_shr(self.reg[src] as u32) as u64, + ebpf::NEG32 if self.executable.get_sbpf_version().enable_neg() => self.reg[dst] = (self.reg[dst] as i32).wrapping_neg() as u64 & (u32::MAX as u64), + ebpf::MOD32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32 % insn.imm as u32) as u64, + ebpf::MOD32_REG if !self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideByZero; self, self.reg[src], u32); + self.reg[dst] = (self.reg[dst] as u32 % self.reg[src] as u32) as u64; + }, + ebpf::XOR32_IMM => self.reg[dst] = (self.reg[dst] as u32 ^ insn.imm as u32) as u64, + ebpf::XOR32_REG => self.reg[dst] = (self.reg[dst] as u32 ^ self.reg[src] as u32) as u64, + ebpf::MOV32_IMM => self.reg[dst] = insn.imm as u32 as u64, + ebpf::MOV32_REG => self.reg[dst] = (self.reg[src] as u32) as u64, + ebpf::ARSH32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(insn.imm as u32) as u64 & (u32::MAX as u64), + ebpf::ARSH32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(self.reg[src] as u32) as u64 & (u32::MAX as u64), + ebpf::LE if self.executable.get_sbpf_version().enable_le() => { + self.reg[dst] = match insn.imm { + 16 => (self.reg[dst] as u16).to_le() as u64, + 32 => (self.reg[dst] as u32).to_le() as u64, + 64 => self.reg[dst].to_le(), + _ => { + throw_error!(self, EbpfError::InvalidInstruction); + } + }; + }, + ebpf::BE => { + self.reg[dst] = match insn.imm { + 16 => (self.reg[dst] as u16).to_be() as u64, + 32 => (self.reg[dst] as u32).to_be() as u64, + 64 => self.reg[dst].to_be(), + _ => { + throw_error!(self, EbpfError::InvalidInstruction); + } + }; + }, + + // BPF_ALU64 class + ebpf::ADD64_IMM => self.reg[dst] = self.reg[dst].wrapping_add(insn.imm as u64), + ebpf::ADD64_REG => self.reg[dst] = self.reg[dst].wrapping_add(self.reg[src]), + ebpf::SUB64_IMM => if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() { + self.reg[dst] = (insn.imm as u64).wrapping_sub(self.reg[dst]) + } else { + self.reg[dst] = self.reg[dst].wrapping_sub(insn.imm as u64) + }, + ebpf::SUB64_REG => self.reg[dst] = self.reg[dst].wrapping_sub(self.reg[src]), + ebpf::MUL64_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(insn.imm as u64), + ebpf::MUL64_REG if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(self.reg[src]), + ebpf::DIV64_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] /= insn.imm as u64, + ebpf::DIV64_REG if !self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideByZero; self, self.reg[src], u64); + self.reg[dst] /= self.reg[src]; + }, + ebpf::OR64_IMM => self.reg[dst] |= insn.imm as u64, + ebpf::OR64_REG => self.reg[dst] |= self.reg[src], + ebpf::AND64_IMM => self.reg[dst] &= insn.imm as u64, + ebpf::AND64_REG => self.reg[dst] &= self.reg[src], + ebpf::LSH64_IMM => self.reg[dst] = self.reg[dst].wrapping_shl(insn.imm as u32), + ebpf::LSH64_REG => self.reg[dst] = self.reg[dst].wrapping_shl(self.reg[src] as u32), + ebpf::RSH64_IMM => self.reg[dst] = self.reg[dst].wrapping_shr(insn.imm as u32), + ebpf::RSH64_REG => self.reg[dst] = self.reg[dst].wrapping_shr(self.reg[src] as u32), + ebpf::NEG64 if self.executable.get_sbpf_version().enable_neg() => self.reg[dst] = (self.reg[dst] as i64).wrapping_neg() as u64, + ebpf::MOD64_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] %= insn.imm as u64, + ebpf::MOD64_REG if !self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideByZero; self, self.reg[src], u64); + self.reg[dst] %= self.reg[src]; + }, + ebpf::XOR64_IMM => self.reg[dst] ^= insn.imm as u64, + ebpf::XOR64_REG => self.reg[dst] ^= self.reg[src], + ebpf::MOV64_IMM => self.reg[dst] = insn.imm as u64, + ebpf::MOV64_REG => self.reg[dst] = self.reg[src], + ebpf::ARSH64_IMM => self.reg[dst] = (self.reg[dst] as i64).wrapping_shr(insn.imm as u32) as u64, + ebpf::ARSH64_REG => self.reg[dst] = (self.reg[dst] as i64).wrapping_shr(self.reg[src] as u32) as u64, + ebpf::HOR64_IMM if self.executable.get_sbpf_version().disable_lddw() => { + self.reg[dst] |= (insn.imm as u64).wrapping_shl(32); + } + + // BPF_PQR class + ebpf::LMUL32_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(insn.imm as i32) as u64, + ebpf::LMUL32_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(self.reg[src] as i32) as u64, + ebpf::LMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(insn.imm as u64), + ebpf::LMUL64_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(self.reg[src]), + ebpf::UHMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u128).wrapping_mul(insn.imm as u64 as u128).wrapping_shr(64) as u64, + ebpf::UHMUL64_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u128).wrapping_mul(self.reg[src] as u128).wrapping_shr(64) as u64, + ebpf::SHMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i64 as i128).wrapping_mul(insn.imm as i128).wrapping_shr(64) as u64, + ebpf::SHMUL64_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i64 as i128).wrapping_mul(self.reg[src] as i64 as i128).wrapping_shr(64) as u64, + ebpf::UDIV32_IMM if self.executable.get_sbpf_version().enable_pqr() => { + self.reg[dst] = (self.reg[dst] as u32 / insn.imm as u32) as u64; + } + ebpf::UDIV32_REG if self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideByZero; self, self.reg[src], u32); + self.reg[dst] = (self.reg[dst] as u32 / self.reg[src] as u32) as u64; + }, + ebpf::UDIV64_IMM if self.executable.get_sbpf_version().enable_pqr() => { + self.reg[dst] /= insn.imm as u64; + } + ebpf::UDIV64_REG if self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideByZero; self, self.reg[src], u64); + self.reg[dst] /= self.reg[src]; + }, + ebpf::UREM32_IMM if self.executable.get_sbpf_version().enable_pqr() => { + self.reg[dst] = (self.reg[dst] as u32 % insn.imm as u32) as u64; + } + ebpf::UREM32_REG if self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideByZero; self, self.reg[src], u32); + self.reg[dst] = (self.reg[dst] as u32 % self.reg[src] as u32) as u64; + }, + ebpf::UREM64_IMM if self.executable.get_sbpf_version().enable_pqr() => { + self.reg[dst] %= insn.imm as u64; + } + ebpf::UREM64_REG if self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideByZero; self, self.reg[src], u64); + self.reg[dst] %= self.reg[src]; + }, + ebpf::SDIV32_IMM if self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i32); + self.reg[dst] = (self.reg[dst] as i32 / insn.imm as i32) as u64; + } + ebpf::SDIV32_REG if self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideByZero; self, self.reg[src], i32); + throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i32); + self.reg[dst] = (self.reg[dst] as i32 / self.reg[src] as i32) as u64; + }, + ebpf::SDIV64_IMM if self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i64); + self.reg[dst] = (self.reg[dst] as i64 / insn.imm) as u64; + } + ebpf::SDIV64_REG if self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideByZero; self, self.reg[src], i64); + throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i64); + self.reg[dst] = (self.reg[dst] as i64 / self.reg[src] as i64) as u64; + }, + ebpf::SREM32_IMM if self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i32); + self.reg[dst] = (self.reg[dst] as i32 % insn.imm as i32) as u64; + } + ebpf::SREM32_REG if self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideByZero; self, self.reg[src], i32); + throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i32); + self.reg[dst] = (self.reg[dst] as i32 % self.reg[src] as i32) as u64; + }, + ebpf::SREM64_IMM if self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i64); + self.reg[dst] = (self.reg[dst] as i64 % insn.imm) as u64; + } + ebpf::SREM64_REG if self.executable.get_sbpf_version().enable_pqr() => { + throw_error!(DivideByZero; self, self.reg[src], i64); + throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i64); + self.reg[dst] = (self.reg[dst] as i64 % self.reg[src] as i64) as u64; + }, + + // BPF_JMP class + ebpf::JA => { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JEQ_IMM => if self.reg[dst] == insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JEQ_REG => if self.reg[dst] == self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JGT_IMM => if self.reg[dst] > insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JGT_REG => if self.reg[dst] > self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JGE_IMM => if self.reg[dst] >= insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JGE_REG => if self.reg[dst] >= self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JLT_IMM => if self.reg[dst] < insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JLT_REG => if self.reg[dst] < self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JLE_IMM => if self.reg[dst] <= insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JLE_REG => if self.reg[dst] <= self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JSET_IMM => if self.reg[dst] & insn.imm as u64 != 0 { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JSET_REG => if self.reg[dst] & self.reg[src] != 0 { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JNE_IMM => if self.reg[dst] != insn.imm as u64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JNE_REG => if self.reg[dst] != self.reg[src] { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JSGT_IMM => if (self.reg[dst] as i64) > insn.imm { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JSGT_REG => if (self.reg[dst] as i64) > self.reg[src] as i64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JSGE_IMM => if (self.reg[dst] as i64) >= insn.imm { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JSGE_REG => if (self.reg[dst] as i64) >= self.reg[src] as i64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JSLT_IMM => if (self.reg[dst] as i64) < insn.imm { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JSLT_REG => if (self.reg[dst] as i64) < self.reg[src] as i64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JSLE_IMM => if (self.reg[dst] as i64) <= insn.imm { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + ebpf::JSLE_REG => if (self.reg[dst] as i64) <= self.reg[src] as i64 { next_pc = (next_pc as i64 + insn.off as i64) as u64; }, + + ebpf::CALL_REG => { + let target_pc = if self.executable.get_sbpf_version().callx_uses_src_reg() { + self.reg[src] + } else { + self.reg[insn.imm as usize] + }; + if !self.push_frame(config) { + return false; + } + check_pc!(self, next_pc, target_pc.wrapping_sub(self.program_vm_addr) / ebpf::INSN_SIZE as u64); + if self.executable.get_sbpf_version().static_syscalls() && self.executable.get_function_registry().lookup_by_key(next_pc as u32).is_none() { + self.vm.due_insn_count += 1; + self.reg[11] = next_pc; + throw_error!(self, EbpfError::UnsupportedInstruction); + } + }, + + // Do not delegate the check to the verifier, since self.registered functions can be + // changed after the program has been verified. + ebpf::CALL_IMM => { + let mut resolved = false; + let (external, internal) = if self.executable.get_sbpf_version().static_syscalls() { + (insn.src == 0, insn.src != 0) + } else { + (true, true) + }; + + if external { + if let Some((_function_name, function)) = self.executable.get_loader().get_function_registry().lookup_by_key(insn.imm as u32) { + resolved = true; + + self.vm.due_insn_count = self.vm.previous_instruction_meter - self.vm.due_insn_count; + self.vm.registers[0..6].copy_from_slice(&self.reg[0..6]); + self.vm.invoke_function(function); + self.vm.due_insn_count = 0; + self.reg[0] = match &self.vm.program_result { + ProgramResult::Ok(value) => *value, + ProgramResult::Err(_err) => return false, + }; + } + } + + if internal && !resolved { + if let Some((_function_name, target_pc)) = self.executable.get_function_registry().lookup_by_key(insn.imm as u32) { + resolved = true; + + // make BPF to BPF call + if !self.push_frame(config) { + return false; + } + check_pc!(self, next_pc, target_pc as u64); + } + } + + if !resolved { + throw_error!(self, EbpfError::UnsupportedInstruction); + } + } + + ebpf::EXIT => { + if self.vm.call_depth == 0 { + if config.enable_instruction_meter && self.vm.due_insn_count > self.vm.previous_instruction_meter { + throw_error!(self, EbpfError::ExceededMaxInstructions); + } + self.vm.program_result = ProgramResult::Ok(self.reg[0]); + return false; + } + // Return from BPF to BPF call + self.vm.call_depth -= 1; + let frame = &self.vm.call_frames[self.vm.call_depth as usize]; + self.reg[ebpf::FRAME_PTR_REG] = frame.frame_pointer; + self.reg[ebpf::FIRST_SCRATCH_REG + ..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS] + .copy_from_slice(&frame.caller_saved_registers); + if !self.executable.get_sbpf_version().dynamic_stack_frames() { + let stack_frame_size = + config.stack_frame_size * if config.enable_stack_frame_gaps { 2 } else { 1 }; + self.vm.stack_pointer -= stack_frame_size as u64; + } + check_pc!(self, next_pc, frame.target_pc); + } + _ => throw_error!(self, EbpfError::UnsupportedInstruction), + } + + if config.enable_instruction_meter && self.vm.due_insn_count >= self.vm.previous_instruction_meter { + self.reg[11] += 1; + throw_error!(self, EbpfError::ExceededMaxInstructions); + } + + self.reg[11] = next_pc; + true + } +} diff --git a/rbpf/src/jit.rs b/rbpf/src/jit.rs new file mode 100644 index 00000000000000..b4bd6f91828972 --- /dev/null +++ b/rbpf/src/jit.rs @@ -0,0 +1,1730 @@ +#![allow(clippy::arithmetic_side_effects)] +// Derived from uBPF +// Copyright 2015 Big Switch Networks, Inc +// (uBPF: JIT algorithm, originally in C) +// Copyright 2016 6WIND S.A. +// (Translation to Rust, MetaBuff addition) +// Copyright 2020 Solana Maintainers +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +use rand::{rngs::SmallRng, Rng, SeedableRng}; +use std::{fmt::Debug, mem, ptr}; + +use crate::{ + ebpf::{self, FIRST_SCRATCH_REG, FRAME_PTR_REG, INSN_SIZE, SCRATCH_REGS, STACK_PTR_REG}, + elf::Executable, + error::{EbpfError, ProgramResult}, + memory_management::{ + allocate_pages, free_pages, get_system_page_size, protect_pages, round_to_page_size, + }, + memory_region::{AccessType, MemoryMapping}, + vm::{get_runtime_environment_key, Config, ContextObject, EbpfVm}, + x86::*, +}; + +const MAX_EMPTY_PROGRAM_MACHINE_CODE_LENGTH: usize = 4096; +const MAX_MACHINE_CODE_LENGTH_PER_INSTRUCTION: usize = 110; +const MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT: usize = 13; + +pub struct JitProgram { + /// OS page size in bytes and the alignment of the sections + page_size: usize, + /// A `*const u8` pointer into the text_section for each BPF instruction + pc_section: &'static mut [usize], + /// The x86 machinecode + text_section: &'static mut [u8], +} + +impl JitProgram { + fn new(pc: usize, code_size: usize) -> Result { + let page_size = get_system_page_size(); + let pc_loc_table_size = round_to_page_size(pc * 8, page_size); + let over_allocated_code_size = round_to_page_size(code_size, page_size); + unsafe { + let raw = allocate_pages(pc_loc_table_size + over_allocated_code_size)?; + Ok(Self { + page_size, + pc_section: std::slice::from_raw_parts_mut(raw as *mut usize, pc), + text_section: std::slice::from_raw_parts_mut( + raw.add(pc_loc_table_size), + over_allocated_code_size, + ), + }) + } + } + + fn seal(&mut self, text_section_usage: usize) -> Result<(), EbpfError> { + if self.page_size == 0 { + return Ok(()); + } + let raw = self.pc_section.as_ptr() as *mut u8; + let pc_loc_table_size = round_to_page_size(self.pc_section.len() * 8, self.page_size); + let over_allocated_code_size = round_to_page_size(self.text_section.len(), self.page_size); + let code_size = round_to_page_size(text_section_usage, self.page_size); + unsafe { + // Fill with debugger traps + std::ptr::write_bytes( + raw.add(pc_loc_table_size).add(text_section_usage), + 0xcc, + code_size - text_section_usage, + ); + if over_allocated_code_size > code_size { + free_pages( + raw.add(pc_loc_table_size).add(code_size), + over_allocated_code_size - code_size, + )?; + } + self.text_section = + std::slice::from_raw_parts_mut(raw.add(pc_loc_table_size), text_section_usage); + protect_pages( + self.pc_section.as_mut_ptr() as *mut u8, + pc_loc_table_size, + false, + )?; + protect_pages(self.text_section.as_mut_ptr(), code_size, true)?; + } + Ok(()) + } + + pub fn invoke( + &self, + _config: &Config, + vm: &mut EbpfVm, + registers: [u64; 12], + ) { + unsafe { + std::arch::asm!( + // RBP and RBX must be saved and restored manually in the current version of rustc and llvm. + "push rbx", + "push rbp", + "mov [{host_stack_pointer}], rsp", + "add QWORD PTR [{host_stack_pointer}], -8", // We will push RIP in "call r10" later + "mov rbx, rax", + "mov rax, [r11 + 0x00]", + "mov rsi, [r11 + 0x08]", + "mov rdx, [r11 + 0x10]", + "mov rcx, [r11 + 0x18]", + "mov r8, [r11 + 0x20]", + "mov r9, [r11 + 0x28]", + "mov r12, [r11 + 0x30]", + "mov r13, [r11 + 0x38]", + "mov r14, [r11 + 0x40]", + "mov r15, [r11 + 0x48]", + "mov rbp, [r11 + 0x50]", + "mov r11, [r11 + 0x58]", + "call r10", + "pop rbp", + "pop rbx", + host_stack_pointer = in(reg) &mut vm.host_stack_pointer, + inlateout("rdi") (vm as *mut _ as *mut u64).offset(get_runtime_environment_key() as isize) => _, + inlateout("rax") (vm.previous_instruction_meter as i64).wrapping_add(registers[11] as i64) => _, + inlateout("r10") self.pc_section[registers[11] as usize] => _, + inlateout("r11") ®isters => _, + lateout("rsi") _, lateout("rdx") _, lateout("rcx") _, lateout("r8") _, + lateout("r9") _, lateout("r12") _, lateout("r13") _, lateout("r14") _, lateout("r15") _, + // lateout("rbp") _, lateout("rbx") _, + ); + } + } + + pub fn machine_code_length(&self) -> usize { + self.text_section.len() + } + + pub fn mem_size(&self) -> usize { + let pc_loc_table_size = round_to_page_size(self.pc_section.len() * 8, self.page_size); + let code_size = round_to_page_size(self.text_section.len(), self.page_size); + pc_loc_table_size + code_size + } +} + +impl Drop for JitProgram { + fn drop(&mut self) { + let pc_loc_table_size = round_to_page_size(self.pc_section.len() * 8, self.page_size); + let code_size = round_to_page_size(self.text_section.len(), self.page_size); + if pc_loc_table_size + code_size > 0 { + unsafe { + let _ = free_pages( + self.pc_section.as_ptr() as *mut u8, + pc_loc_table_size + code_size, + ); + } + } + } +} + +impl Debug for JitProgram { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fmt.write_fmt(format_args!("JitProgram {:?}", self as *const _)) + } +} + +impl PartialEq for JitProgram { + fn eq(&self, other: &Self) -> bool { + std::ptr::eq(self as *const _, other as *const _) + } +} + +// Used to define subroutines and then call them +// See JitCompiler::set_anchor() and JitCompiler::relative_to_anchor() +const ANCHOR_TRACE: usize = 0; +const ANCHOR_THROW_EXCEEDED_MAX_INSTRUCTIONS: usize = 1; +const ANCHOR_EPILOGUE: usize = 2; +const ANCHOR_THROW_EXCEPTION_UNCHECKED: usize = 3; +const ANCHOR_EXIT: usize = 4; +const ANCHOR_THROW_EXCEPTION: usize = 5; +const ANCHOR_CALL_DEPTH_EXCEEDED: usize = 6; +const ANCHOR_CALL_OUTSIDE_TEXT_SEGMENT: usize = 7; +const ANCHOR_DIV_BY_ZERO: usize = 8; +const ANCHOR_DIV_OVERFLOW: usize = 9; +const ANCHOR_CALL_UNSUPPORTED_INSTRUCTION: usize = 10; +const ANCHOR_EXTERNAL_FUNCTION_CALL: usize = 11; +const ANCHOR_ANCHOR_INTERNAL_FUNCTION_CALL_PROLOGUE: usize = 12; +const ANCHOR_ANCHOR_INTERNAL_FUNCTION_CALL_REG: usize = 13; +const ANCHOR_TRANSLATE_MEMORY_ADDRESS: usize = 21; +const ANCHOR_COUNT: usize = 30; // Update me when adding or removing anchors + +const REGISTER_MAP: [u8; 11] = [ + CALLER_SAVED_REGISTERS[0], // RAX + ARGUMENT_REGISTERS[1], // RSI + ARGUMENT_REGISTERS[2], // RDX + ARGUMENT_REGISTERS[3], // RCX + ARGUMENT_REGISTERS[4], // R8 + ARGUMENT_REGISTERS[5], // R9 + CALLEE_SAVED_REGISTERS[2], // R12 + CALLEE_SAVED_REGISTERS[3], // R13 + CALLEE_SAVED_REGISTERS[4], // R14 + CALLEE_SAVED_REGISTERS[5], // R15 + CALLEE_SAVED_REGISTERS[0], // RBP +]; + +/// RDI: Used together with slot_in_vm() +const REGISTER_PTR_TO_VM: u8 = ARGUMENT_REGISTERS[0]; +/// RBX: Program counter limit +const REGISTER_INSTRUCTION_METER: u8 = CALLEE_SAVED_REGISTERS[1]; +/// R10: Other scratch register +const REGISTER_OTHER_SCRATCH: u8 = CALLER_SAVED_REGISTERS[7]; +/// R11: Scratch register +const REGISTER_SCRATCH: u8 = CALLER_SAVED_REGISTERS[8]; + +#[derive(Copy, Clone, Debug)] +pub enum OperandSize { + S0 = 0, + S8 = 8, + S16 = 16, + S32 = 32, + S64 = 64, +} + +enum Value { + Register(u8), + RegisterIndirect(u8, i32, bool), + RegisterPlusConstant32(u8, i32, bool), + RegisterPlusConstant64(u8, i64, bool), + Constant64(i64, bool), +} + +struct Argument { + index: usize, + value: Value, +} + +#[derive(Debug)] +struct Jump { + location: *const u8, + target_pc: usize, +} + +/// Indices of slots inside RuntimeEnvironment +enum RuntimeEnvironmentSlot { + HostStackPointer = 0, + CallDepth = 1, + StackPointer = 2, + ContextObjectPointer = 3, + PreviousInstructionMeter = 4, + DueInsnCount = 5, + StopwatchNumerator = 6, + StopwatchDenominator = 7, + Registers = 8, + ProgramResult = 20, + MemoryMapping = 28, +} + +/* Explaination of the Instruction Meter + + The instruction meter serves two purposes: First, measure how many BPF instructions are + executed (profiling) and second, limit this number by stopping the program with an exception + once a given threshold is reached (validation). One approach would be to increment and + validate the instruction meter before each instruction. However, this would heavily impact + performance. Thus, we only profile and validate the instruction meter at branches. + + For this, we implicitly sum up all the instructions between two branches. + It is easy to know the end of such a slice of instructions, but how do we know where it + started? There could be multiple ways to jump onto a path which all lead to the same final + branch. This is, where the integral technique comes in. The program is basically a sequence + of instructions with the x-axis being the program counter (short "pc"). The cost function is + a constant function which returns one for every point on the x axis. Now, the instruction + meter needs to calculate the definite integral of the cost function between the start and the + end of the current slice of instructions. For that we need the indefinite integral of the cost + function. Fortunately, the derivative of the pc is the cost function (it increases by one for + every instruction), thus the pc is an antiderivative of the the cost function and a valid + indefinite integral. So, to calculate an definite integral of the cost function, we just need + to subtract the start pc from the end pc of the slice. This difference can then be subtracted + from the remaining instruction counter until it goes below zero at which point it reaches + the instruction meter limit. Ok, but how do we know the start of the slice at the end? + + The trick is: We do not need to know. As subtraction and addition are associative operations, + we can reorder them, even beyond the current branch. Thus, we can simply account for the + amount the start will subtract at the next branch by already adding that to the remaining + instruction counter at the current branch. So, every branch just subtracts its current pc + (the end of the slice) and adds the target pc (the start of the next slice) to the remaining + instruction counter. This way, no branch needs to know the pc of the last branch explicitly. + Another way to think about this trick is as follows: The remaining instruction counter now + measures what the maximum pc is, that we can reach with the remaining budget after the last + branch. + + One problem are conditional branches. There are basically two ways to handle them: Either, + only do the profiling if the branch is taken, which requires two jumps (one for the profiling + and one to get to the target pc). Or, always profile it as if the jump to the target pc was + taken, but then behind the conditional branch, undo the profiling (as it was not taken). We + use the second method and the undo profiling is the same as the normal profiling, just with + reversed plus and minus signs. + + Another special case to keep in mind are return instructions. They would require us to know + the return address (target pc), but in the JIT we already converted that to be a host address. + Of course, one could also save the BPF return address on the stack, but an even simpler + solution exists: Just count as if you were jumping to an specific target pc before the exit, + and then after returning use the undo profiling. The trick is, that the undo profiling now + has the current pc which is the BPF return address. The virtual target pc we count towards + and undo again can be anything, so we just set it to zero. +*/ + +pub struct JitCompiler<'a, C: ContextObject> { + result: JitProgram, + text_section_jumps: Vec, + anchors: [*const u8; ANCHOR_COUNT], + offset_in_text_section: usize, + executable: &'a Executable, + program: &'a [u8], + program_vm_addr: u64, + config: &'a Config, + pc: usize, + last_instruction_meter_validation_pc: usize, + next_noop_insertion: u32, + runtime_environment_key: i32, + diversification_rng: SmallRng, + stopwatch_is_active: bool, +} + +#[rustfmt::skip] +impl<'a, C: ContextObject> JitCompiler<'a, C> { + /// Constructs a new compiler and allocates memory for the compilation output + pub fn new(executable: &'a Executable) -> Result { + let config = executable.get_config(); + let (program_vm_addr, program) = executable.get_text_bytes(); + + // Scan through program to find actual number of instructions + let mut pc = 0; + if executable.get_sbpf_version().disable_lddw() { + pc = program.len() / ebpf::INSN_SIZE; + } else { + while (pc + 1) * ebpf::INSN_SIZE <= program.len() { + let insn = ebpf::get_insn_unchecked(program, pc); + pc += match insn.opc { + ebpf::LD_DW_IMM => 2, + _ => 1, + }; + } + } + + let mut code_length_estimate = MAX_EMPTY_PROGRAM_MACHINE_CODE_LENGTH + MAX_MACHINE_CODE_LENGTH_PER_INSTRUCTION * pc; + if config.noop_instruction_rate != 0 { + code_length_estimate += code_length_estimate / config.noop_instruction_rate as usize; + } + if config.instruction_meter_checkpoint_distance != 0 { + code_length_estimate += pc / config.instruction_meter_checkpoint_distance * MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT; + } + + let runtime_environment_key = get_runtime_environment_key(); + let mut diversification_rng = SmallRng::from_rng(rand::thread_rng()).map_err(|_| EbpfError::JitNotCompiled)?; + + Ok(Self { + result: JitProgram::new(pc, code_length_estimate)?, + text_section_jumps: vec![], + anchors: [std::ptr::null(); ANCHOR_COUNT], + offset_in_text_section: 0, + executable, + program_vm_addr, + program, + config, + pc: 0, + last_instruction_meter_validation_pc: 0, + next_noop_insertion: if config.noop_instruction_rate == 0 { u32::MAX } else { diversification_rng.gen_range(0..config.noop_instruction_rate * 2) }, + runtime_environment_key, + diversification_rng, + stopwatch_is_active: false, + }) + } + + /// Compiles the given executable, consuming the compiler + pub fn compile(mut self) -> Result { + let text_section_base = self.result.text_section.as_ptr(); + + self.emit_subroutines(); + + while self.pc * ebpf::INSN_SIZE < self.program.len() { + if self.offset_in_text_section + MAX_MACHINE_CODE_LENGTH_PER_INSTRUCTION > self.result.text_section.len() { + return Err(EbpfError::ExhaustedTextSegment(self.pc)); + } + let mut insn = ebpf::get_insn_unchecked(self.program, self.pc); + self.result.pc_section[self.pc] = unsafe { text_section_base.add(self.offset_in_text_section) } as usize; + + // Regular instruction meter checkpoints to prevent long linear runs from exceeding their budget + if self.last_instruction_meter_validation_pc + self.config.instruction_meter_checkpoint_distance <= self.pc { + self.emit_validate_instruction_count(true, Some(self.pc)); + } + + if self.config.enable_instruction_tracing { + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_SCRATCH, self.pc as i64)); + self.emit_ins(X86Instruction::call_immediate(self.relative_to_anchor(ANCHOR_TRACE, 5))); + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_SCRATCH, 0)); + } + + let dst = if insn.dst == STACK_PTR_REG as u8 { u8::MAX } else { REGISTER_MAP[insn.dst as usize] }; + let src = REGISTER_MAP[insn.src as usize]; + let target_pc = (self.pc as isize + insn.off as isize + 1) as usize; + + match insn.opc { + ebpf::ADD64_IMM if insn.dst == STACK_PTR_REG as u8 && self.executable.get_sbpf_version().dynamic_stack_frames() => { + let stack_ptr_access = X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::StackPointer)); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, REGISTER_PTR_TO_VM, insn.imm, Some(stack_ptr_access))); + } + + ebpf::LD_DW_IMM => { + self.emit_validate_and_profile_instruction_count(true, Some(self.pc + 2)); + self.pc += 1; + self.result.pc_section[self.pc] = self.anchors[ANCHOR_CALL_UNSUPPORTED_INSTRUCTION] as usize; + ebpf::augment_lddw_unchecked(self.program, &mut insn); + if self.should_sanitize_constant(insn.imm) { + self.emit_sanitized_load_immediate(OperandSize::S64, dst, insn.imm); + } else { + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, dst, insn.imm)); + } + }, + + // BPF_LDX class + ebpf::LD_B_REG => { + self.emit_address_translation(Some(dst), Value::RegisterPlusConstant64(src, insn.off as i64, true), 1, None); + }, + ebpf::LD_H_REG => { + self.emit_address_translation(Some(dst), Value::RegisterPlusConstant64(src, insn.off as i64, true), 2, None); + }, + ebpf::LD_W_REG => { + self.emit_address_translation(Some(dst), Value::RegisterPlusConstant64(src, insn.off as i64, true), 4, None); + }, + ebpf::LD_DW_REG => { + self.emit_address_translation(Some(dst), Value::RegisterPlusConstant64(src, insn.off as i64, true), 8, None); + }, + + // BPF_ST class + ebpf::ST_B_IMM => { + self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 1, Some(Value::Constant64(insn.imm, true))); + }, + ebpf::ST_H_IMM => { + self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 2, Some(Value::Constant64(insn.imm, true))); + }, + ebpf::ST_W_IMM => { + self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 4, Some(Value::Constant64(insn.imm, true))); + }, + ebpf::ST_DW_IMM => { + self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 8, Some(Value::Constant64(insn.imm, true))); + }, + + // BPF_STX class + ebpf::ST_B_REG => { + self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 1, Some(Value::Register(src))); + }, + ebpf::ST_H_REG => { + self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 2, Some(Value::Register(src))); + }, + ebpf::ST_W_REG => { + self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 4, Some(Value::Register(src))); + }, + ebpf::ST_DW_REG => { + self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 8, Some(Value::Register(src))); + }, + + // BPF_ALU class + ebpf::ADD32_IMM => { + self.emit_sanitized_alu(OperandSize::S32, 0x01, 0, dst, insn.imm); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + }, + ebpf::ADD32_REG => { + self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x01, src, dst, 0, None)); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + }, + ebpf::SUB32_IMM => { + if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() { + self.emit_ins(X86Instruction::alu(OperandSize::S32, 0xf7, 3, dst, 0, None)); + if insn.imm != 0 { + self.emit_sanitized_alu(OperandSize::S32, 0x01, 0, dst, insn.imm); + } + } else { + self.emit_sanitized_alu(OperandSize::S32, 0x29, 5, dst, insn.imm); + } + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + }, + ebpf::SUB32_REG => { + self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x29, src, dst, 0, None)); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + }, + ebpf::MUL32_IMM | ebpf::DIV32_IMM | ebpf::MOD32_IMM if !self.executable.get_sbpf_version().enable_pqr() => + self.emit_product_quotient_remainder(OperandSize::S32, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MOD, (insn.opc & ebpf::BPF_ALU_OP_MASK) != ebpf::BPF_MUL, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MUL, dst, dst, Some(insn.imm)), + ebpf::MUL32_REG | ebpf::DIV32_REG | ebpf::MOD32_REG if !self.executable.get_sbpf_version().enable_pqr() => + self.emit_product_quotient_remainder(OperandSize::S32, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MOD, (insn.opc & ebpf::BPF_ALU_OP_MASK) != ebpf::BPF_MUL, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MUL, src, dst, None), + ebpf::OR32_IMM => self.emit_sanitized_alu(OperandSize::S32, 0x09, 1, dst, insn.imm), + ebpf::OR32_REG => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x09, src, dst, 0, None)), + ebpf::AND32_IMM => self.emit_sanitized_alu(OperandSize::S32, 0x21, 4, dst, insn.imm), + ebpf::AND32_REG => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x21, src, dst, 0, None)), + ebpf::LSH32_IMM => self.emit_shift(OperandSize::S32, 4, REGISTER_SCRATCH, dst, Some(insn.imm)), + ebpf::LSH32_REG => self.emit_shift(OperandSize::S32, 4, src, dst, None), + ebpf::RSH32_IMM => self.emit_shift(OperandSize::S32, 5, REGISTER_SCRATCH, dst, Some(insn.imm)), + ebpf::RSH32_REG => self.emit_shift(OperandSize::S32, 5, src, dst, None), + ebpf::NEG32 if self.executable.get_sbpf_version().enable_neg() => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0xf7, 3, dst, 0, None)), + ebpf::XOR32_IMM => self.emit_sanitized_alu(OperandSize::S32, 0x31, 6, dst, insn.imm), + ebpf::XOR32_REG => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x31, src, dst, 0, None)), + ebpf::MOV32_IMM => { + if self.should_sanitize_constant(insn.imm) { + self.emit_sanitized_load_immediate(OperandSize::S32, dst, insn.imm); + } else { + self.emit_ins(X86Instruction::load_immediate(OperandSize::S32, dst, insn.imm)); + } + } + ebpf::MOV32_REG => self.emit_ins(X86Instruction::mov(OperandSize::S32, src, dst)), + ebpf::ARSH32_IMM => self.emit_shift(OperandSize::S32, 7, REGISTER_SCRATCH, dst, Some(insn.imm)), + ebpf::ARSH32_REG => self.emit_shift(OperandSize::S32, 7, src, dst, None), + ebpf::LE if self.executable.get_sbpf_version().enable_le() => { + match insn.imm { + 16 => { + self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x81, 4, dst, 0xffff, None)); // Mask to 16 bit + } + 32 => { + self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x81, 4, dst, -1, None)); // Mask to 32 bit + } + 64 => {} + _ => { + return Err(EbpfError::InvalidInstruction); + } + } + }, + ebpf::BE => { + match insn.imm { + 16 => { + self.emit_ins(X86Instruction::bswap(OperandSize::S16, dst)); + self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x81, 4, dst, 0xffff, None)); // Mask to 16 bit + } + 32 => self.emit_ins(X86Instruction::bswap(OperandSize::S32, dst)), + 64 => self.emit_ins(X86Instruction::bswap(OperandSize::S64, dst)), + _ => { + return Err(EbpfError::InvalidInstruction); + } + } + }, + + // BPF_ALU64 class + ebpf::ADD64_IMM => self.emit_sanitized_alu(OperandSize::S64, 0x01, 0, dst, insn.imm), + ebpf::ADD64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, src, dst, 0, None)), + ebpf::SUB64_IMM => { + if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() { + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xf7, 3, dst, 0, None)); + if insn.imm != 0 { + self.emit_sanitized_alu(OperandSize::S64, 0x01, 0, dst, insn.imm); + } + } else { + self.emit_sanitized_alu(OperandSize::S64, 0x29, 5, dst, insn.imm); + } + } + ebpf::SUB64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, src, dst, 0, None)), + ebpf::MUL64_IMM | ebpf::DIV64_IMM | ebpf::MOD64_IMM if !self.executable.get_sbpf_version().enable_pqr() => + self.emit_product_quotient_remainder(OperandSize::S64, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MOD, (insn.opc & ebpf::BPF_ALU_OP_MASK) != ebpf::BPF_MUL, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MUL, dst, dst, Some(insn.imm)), + ebpf::MUL64_REG | ebpf::DIV64_REG | ebpf::MOD64_REG if !self.executable.get_sbpf_version().enable_pqr() => + self.emit_product_quotient_remainder(OperandSize::S64, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MOD, (insn.opc & ebpf::BPF_ALU_OP_MASK) != ebpf::BPF_MUL, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MUL, src, dst, None), + ebpf::OR64_IMM => self.emit_sanitized_alu(OperandSize::S64, 0x09, 1, dst, insn.imm), + ebpf::OR64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x09, src, dst, 0, None)), + ebpf::AND64_IMM => self.emit_sanitized_alu(OperandSize::S64, 0x21, 4, dst, insn.imm), + ebpf::AND64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x21, src, dst, 0, None)), + ebpf::LSH64_IMM => self.emit_shift(OperandSize::S64, 4, REGISTER_SCRATCH, dst, Some(insn.imm)), + ebpf::LSH64_REG => self.emit_shift(OperandSize::S64, 4, src, dst, None), + ebpf::RSH64_IMM => self.emit_shift(OperandSize::S64, 5, REGISTER_SCRATCH, dst, Some(insn.imm)), + ebpf::RSH64_REG => self.emit_shift(OperandSize::S64, 5, src, dst, None), + ebpf::NEG64 if self.executable.get_sbpf_version().enable_neg() => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xf7, 3, dst, 0, None)), + ebpf::XOR64_IMM => self.emit_sanitized_alu(OperandSize::S64, 0x31, 6, dst, insn.imm), + ebpf::XOR64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x31, src, dst, 0, None)), + ebpf::MOV64_IMM => { + if self.should_sanitize_constant(insn.imm) { + self.emit_sanitized_load_immediate(OperandSize::S64, dst, insn.imm); + } else { + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, dst, insn.imm)); + } + } + ebpf::MOV64_REG => self.emit_ins(X86Instruction::mov(OperandSize::S64, src, dst)), + ebpf::ARSH64_IMM => self.emit_shift(OperandSize::S64, 7, REGISTER_SCRATCH, dst, Some(insn.imm)), + ebpf::ARSH64_REG => self.emit_shift(OperandSize::S64, 7, src, dst, None), + ebpf::HOR64_IMM => { + self.emit_sanitized_alu(OperandSize::S64, 0x09, 1, dst, (insn.imm as u64).wrapping_shl(32) as i64); + } + + // BPF_PQR class + ebpf::LMUL32_IMM | ebpf::LMUL64_IMM | ebpf::UHMUL64_IMM | ebpf::SHMUL64_IMM | + ebpf::UDIV32_IMM | ebpf::UDIV64_IMM | ebpf::UREM32_IMM | ebpf::UREM64_IMM | + ebpf::SDIV32_IMM | ebpf::SDIV64_IMM | ebpf::SREM32_IMM | ebpf::SREM64_IMM + if self.executable.get_sbpf_version().enable_pqr() => { + self.emit_product_quotient_remainder( + if insn.opc & (1 << 4) != 0 { OperandSize::S64 } else { OperandSize::S32 }, + insn.opc & (1 << 5) != 0, + insn.opc & (1 << 6) != 0, + insn.opc & (1 << 7) != 0, + dst, dst, Some(insn.imm), + ) + } + ebpf::LMUL32_REG | ebpf::LMUL64_REG | ebpf::UHMUL64_REG | ebpf::SHMUL64_REG | + ebpf::UDIV32_REG | ebpf::UDIV64_REG | ebpf::UREM32_REG | ebpf::UREM64_REG | + ebpf::SDIV32_REG | ebpf::SDIV64_REG | ebpf::SREM32_REG | ebpf::SREM64_REG + if self.executable.get_sbpf_version().enable_pqr() => { + self.emit_product_quotient_remainder( + if insn.opc & (1 << 4) != 0 { OperandSize::S64 } else { OperandSize::S32 }, + insn.opc & (1 << 5) != 0, + insn.opc & (1 << 6) != 0, + insn.opc & (1 << 7) != 0, + src, dst, None, + ) + } + + // BPF_JMP class + ebpf::JA => { + self.emit_validate_and_profile_instruction_count(false, Some(target_pc)); + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_SCRATCH, target_pc as i64)); + let jump_offset = self.relative_to_target_pc(target_pc, 5); + self.emit_ins(X86Instruction::jump_immediate(jump_offset)); + }, + ebpf::JEQ_IMM => self.emit_conditional_branch_imm(0x84, false, insn.imm, dst, target_pc), + ebpf::JEQ_REG => self.emit_conditional_branch_reg(0x84, false, src, dst, target_pc), + ebpf::JGT_IMM => self.emit_conditional_branch_imm(0x87, false, insn.imm, dst, target_pc), + ebpf::JGT_REG => self.emit_conditional_branch_reg(0x87, false, src, dst, target_pc), + ebpf::JGE_IMM => self.emit_conditional_branch_imm(0x83, false, insn.imm, dst, target_pc), + ebpf::JGE_REG => self.emit_conditional_branch_reg(0x83, false, src, dst, target_pc), + ebpf::JLT_IMM => self.emit_conditional_branch_imm(0x82, false, insn.imm, dst, target_pc), + ebpf::JLT_REG => self.emit_conditional_branch_reg(0x82, false, src, dst, target_pc), + ebpf::JLE_IMM => self.emit_conditional_branch_imm(0x86, false, insn.imm, dst, target_pc), + ebpf::JLE_REG => self.emit_conditional_branch_reg(0x86, false, src, dst, target_pc), + ebpf::JSET_IMM => self.emit_conditional_branch_imm(0x85, true, insn.imm, dst, target_pc), + ebpf::JSET_REG => self.emit_conditional_branch_reg(0x85, true, src, dst, target_pc), + ebpf::JNE_IMM => self.emit_conditional_branch_imm(0x85, false, insn.imm, dst, target_pc), + ebpf::JNE_REG => self.emit_conditional_branch_reg(0x85, false, src, dst, target_pc), + ebpf::JSGT_IMM => self.emit_conditional_branch_imm(0x8f, false, insn.imm, dst, target_pc), + ebpf::JSGT_REG => self.emit_conditional_branch_reg(0x8f, false, src, dst, target_pc), + ebpf::JSGE_IMM => self.emit_conditional_branch_imm(0x8d, false, insn.imm, dst, target_pc), + ebpf::JSGE_REG => self.emit_conditional_branch_reg(0x8d, false, src, dst, target_pc), + ebpf::JSLT_IMM => self.emit_conditional_branch_imm(0x8c, false, insn.imm, dst, target_pc), + ebpf::JSLT_REG => self.emit_conditional_branch_reg(0x8c, false, src, dst, target_pc), + ebpf::JSLE_IMM => self.emit_conditional_branch_imm(0x8e, false, insn.imm, dst, target_pc), + ebpf::JSLE_REG => self.emit_conditional_branch_reg(0x8e, false, src, dst, target_pc), + ebpf::CALL_IMM => { + // For JIT, external functions MUST be registered at compile time. + + let mut resolved = false; + let (external, internal) = if self.executable.get_sbpf_version().static_syscalls() { + (insn.src == 0, insn.src != 0) + } else { + (true, true) + }; + + if external { + if let Some((_function_name, function)) = self.executable.get_loader().get_function_registry().lookup_by_key(insn.imm as u32) { + self.emit_validate_and_profile_instruction_count(true, Some(0)); + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_SCRATCH, function as usize as i64)); + self.emit_ins(X86Instruction::call_immediate(self.relative_to_anchor(ANCHOR_EXTERNAL_FUNCTION_CALL, 5))); + self.emit_undo_profile_instruction_count(0); + resolved = true; + } + } + + if internal { + if let Some((_function_name, target_pc)) = self.executable.get_function_registry().lookup_by_key(insn.imm as u32) { + self.emit_internal_call(Value::Constant64(target_pc as i64, false)); + resolved = true; + } + } + + if !resolved { + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_SCRATCH, self.pc as i64)); + self.emit_ins(X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_CALL_UNSUPPORTED_INSTRUCTION, 5))); + } + }, + ebpf::CALL_REG => { + let target_pc = if self.executable.get_sbpf_version().callx_uses_src_reg() { + src + } else { + REGISTER_MAP[insn.imm as usize] + }; + self.emit_internal_call(Value::Register(target_pc)); + }, + ebpf::EXIT => { + let call_depth_access = X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::CallDepth)); + self.emit_ins(X86Instruction::load(OperandSize::S64, REGISTER_PTR_TO_VM, REGISTER_MAP[FRAME_PTR_REG], call_depth_access)); + + // If CallDepth == 0, we've reached the exit instruction of the entry point + self.emit_ins(X86Instruction::cmp_immediate(OperandSize::S32, REGISTER_MAP[FRAME_PTR_REG], 0, None)); + if self.config.enable_instruction_meter { + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_SCRATCH, self.pc as i64)); + } + // we're done + self.emit_ins(X86Instruction::conditional_jump_immediate(0x84, self.relative_to_anchor(ANCHOR_EXIT, 6))); + + // else decrement and update CallDepth + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 5, REGISTER_MAP[FRAME_PTR_REG], 1, None)); + self.emit_ins(X86Instruction::store(OperandSize::S64, REGISTER_MAP[FRAME_PTR_REG], REGISTER_PTR_TO_VM, call_depth_access)); + + if !self.executable.get_sbpf_version().dynamic_stack_frames() { + let stack_pointer_access = X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::StackPointer)); + let stack_frame_size = self.config.stack_frame_size as i64 * if self.config.enable_stack_frame_gaps { 2 } else { 1 }; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 5, REGISTER_PTR_TO_VM, stack_frame_size, Some(stack_pointer_access))); // env.stack_pointer -= stack_frame_size; + } + + // and return + self.emit_validate_and_profile_instruction_count(false, Some(0)); + self.emit_ins(X86Instruction::return_near()); + }, + + _ => return Err(EbpfError::UnsupportedInstruction), + } + + self.pc += 1; + } + + // Bumper in case there was no final exit + if self.offset_in_text_section + MAX_MACHINE_CODE_LENGTH_PER_INSTRUCTION > self.result.text_section.len() { + return Err(EbpfError::ExhaustedTextSegment(self.pc)); + } + self.emit_validate_and_profile_instruction_count(true, Some(self.pc + 2)); + self.emit_set_exception_kind(EbpfError::ExecutionOverrun); + self.emit_ins(X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_THROW_EXCEPTION, 5))); + + self.resolve_jumps(); + self.result.seal(self.offset_in_text_section)?; + Ok(self.result) + } + + #[inline] + fn should_sanitize_constant(&self, value: i64) -> bool { + if !self.config.sanitize_user_provided_values { + return false; + } + + match value as u64 { + 0xFFFF + | 0xFFFFFF + | 0xFFFFFFFF + | 0xFFFFFFFFFF + | 0xFFFFFFFFFFFF + | 0xFFFFFFFFFFFFFF + | 0xFFFFFFFFFFFFFFFF => false, + v if v <= 0xFF => false, + v if !v <= 0xFF => false, + _ => true + } + } + + #[inline] + fn slot_in_vm(&self, slot: RuntimeEnvironmentSlot) -> i32 { + 8 * (slot as i32 - self.runtime_environment_key) + } + + #[inline] + pub(crate) fn emit(&mut self, data: T) { + unsafe { + let ptr = self.result.text_section.as_ptr().add(self.offset_in_text_section); + #[allow(clippy::cast_ptr_alignment)] + ptr::write_unaligned(ptr as *mut T, data as T); + } + self.offset_in_text_section += mem::size_of::(); + } + + #[inline] + pub(crate) fn emit_variable_length(&mut self, size: OperandSize, data: u64) { + match size { + OperandSize::S0 => {}, + OperandSize::S8 => self.emit::(data as u8), + OperandSize::S16 => self.emit::(data as u16), + OperandSize::S32 => self.emit::(data as u32), + OperandSize::S64 => self.emit::(data), + } + } + + // This function helps the optimizer to inline the machinecode emission while avoiding stack allocations + #[inline(always)] + pub fn emit_ins(&mut self, instruction: X86Instruction) { + instruction.emit(self); + if self.next_noop_insertion == 0 { + self.next_noop_insertion = self.diversification_rng.gen_range(0..self.config.noop_instruction_rate * 2); + // X86Instruction::noop().emit(self)?; + self.emit::(0x90); + } else { + self.next_noop_insertion -= 1; + } + } + + #[inline] + fn emit_sanitized_load_immediate(&mut self, size: OperandSize, destination: u8, value: i64) { + match size { + OperandSize::S32 => { + let key = self.diversification_rng.gen::() as i64; + self.emit_ins(X86Instruction::load_immediate(size, destination, (value as i32).wrapping_sub(key as i32) as i64)); + self.emit_ins(X86Instruction::alu(size, 0x81, 0, destination, key, None)); + }, + OperandSize::S64 if value >= i32::MIN as i64 && value <= i32::MAX as i64 => { + let key = self.diversification_rng.gen::() as i64; + self.emit_ins(X86Instruction::load_immediate(size, destination, value.wrapping_sub(key))); + self.emit_ins(X86Instruction::alu(size, 0x81, 0, destination, key, None)); + }, + OperandSize::S64 if value as u64 & u32::MAX as u64 == 0 => { + let key = self.diversification_rng.gen::() as i64; + self.emit_ins(X86Instruction::load_immediate(size, destination, value.rotate_right(32).wrapping_sub(key))); + self.emit_ins(X86Instruction::alu(size, 0x81, 0, destination, key, None)); // wrapping_add(key) + self.emit_ins(X86Instruction::alu(size, 0xc1, 4, destination, 32, None)); // shift_left(32) + }, + OperandSize::S64 => { + let key = self.diversification_rng.gen::(); + if destination != REGISTER_SCRATCH { + self.emit_ins(X86Instruction::load_immediate(size, destination, value.wrapping_sub(key))); + self.emit_ins(X86Instruction::load_immediate(size, REGISTER_SCRATCH, key)); + self.emit_ins(X86Instruction::alu(size, 0x01, REGISTER_SCRATCH, destination, 0, None)); + } else { + let lower_key = key as i32 as i64; + let upper_key = (key >> 32) as i32 as i64; + self.emit_ins(X86Instruction::load_immediate(size, destination, value.wrapping_sub(lower_key).rotate_right(32).wrapping_sub(upper_key))); + self.emit_ins(X86Instruction::alu(size, 0x81, 0, destination, upper_key, None)); // wrapping_add(upper_key) + self.emit_ins(X86Instruction::alu(size, 0xc1, 1, destination, 32, None)); // rotate_right(32) + self.emit_ins(X86Instruction::alu(size, 0x81, 0, destination, lower_key, None)); // wrapping_add(lower_key) + } + }, + _ => { + #[cfg(debug_assertions)] + unreachable!(); + } + } + } + + #[inline] + fn emit_sanitized_alu(&mut self, size: OperandSize, opcode: u8, opcode_extension: u8, destination: u8, immediate: i64) { + if self.should_sanitize_constant(immediate) { + self.emit_sanitized_load_immediate(size, REGISTER_SCRATCH, immediate); + self.emit_ins(X86Instruction::alu(size, opcode, REGISTER_SCRATCH, destination, 0, None)); + } else if immediate >= i32::MIN as i64 && immediate <= i32::MAX as i64 { + self.emit_ins(X86Instruction::alu(size, 0x81, opcode_extension, destination, immediate, None)); + } else { + self.emit_ins(X86Instruction::load_immediate(size, REGISTER_SCRATCH, immediate)); + self.emit_ins(X86Instruction::alu(size, opcode, REGISTER_SCRATCH, destination, 0, None)); + } + } + + #[allow(dead_code)] + #[inline] + fn emit_stopwatch(&mut self, begin: bool) { + self.stopwatch_is_active = true; + self.emit_ins(X86Instruction::push(RDX, None)); + self.emit_ins(X86Instruction::push(RAX, None)); + self.emit_ins(X86Instruction::fence(FenceType::Load)); // lfence + self.emit_ins(X86Instruction::cycle_count()); // rdtsc + self.emit_ins(X86Instruction::fence(FenceType::Load)); // lfence + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xc1, 4, RDX, 32, None)); // RDX <<= 32; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x09, RDX, RAX, 0, None)); // RAX |= RDX; + if begin { + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, RAX, REGISTER_PTR_TO_VM, 0, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::StopwatchNumerator))))); // *numerator -= RAX; + } else { + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, RAX, REGISTER_PTR_TO_VM, 0, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::StopwatchNumerator))))); // *numerator += RAX; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, REGISTER_PTR_TO_VM, 1, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::StopwatchDenominator))))); // *denominator += 1; + } + self.emit_ins(X86Instruction::pop(RAX)); + self.emit_ins(X86Instruction::pop(RDX)); + } + + #[inline] + fn emit_validate_instruction_count(&mut self, exclusive: bool, pc: Option) { + if !self.config.enable_instruction_meter { + return; + } + // Update `MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT` if you change the code generation here + if let Some(pc) = pc { + self.last_instruction_meter_validation_pc = pc; + self.emit_ins(X86Instruction::cmp_immediate(OperandSize::S64, REGISTER_INSTRUCTION_METER, pc as i64 + 1, None)); + } else { + self.emit_ins(X86Instruction::cmp(OperandSize::S64, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, None)); + } + self.emit_ins(X86Instruction::conditional_jump_immediate(if exclusive { 0x82 } else { 0x86 }, self.relative_to_anchor(ANCHOR_THROW_EXCEEDED_MAX_INSTRUCTIONS, 6))); + } + + #[inline] + fn emit_profile_instruction_count(&mut self, target_pc: Option) { + match target_pc { + Some(target_pc) => { + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, REGISTER_INSTRUCTION_METER, target_pc as i64 - self.pc as i64 - 1, None)); // instruction_meter += target_pc - (self.pc + 1); + }, + None => { + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 5, REGISTER_INSTRUCTION_METER, self.pc as i64 + 1, None)); // instruction_meter -= self.pc + 1; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, self.pc as i64, None)); // instruction_meter += target_pc; + }, + } + } + + #[inline] + fn emit_validate_and_profile_instruction_count(&mut self, exclusive: bool, target_pc: Option) { + if self.config.enable_instruction_meter { + self.emit_validate_instruction_count(exclusive, Some(self.pc)); + self.emit_profile_instruction_count(target_pc); + } + } + + #[inline] + fn emit_undo_profile_instruction_count(&mut self, target_pc: usize) { + if self.config.enable_instruction_meter { + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, REGISTER_INSTRUCTION_METER, self.pc as i64 + 1 - target_pc as i64, None)); // instruction_meter += (self.pc + 1) - target_pc; + } + } + + fn emit_rust_call(&mut self, target: Value, arguments: &[Argument], result_reg: Option) { + let mut saved_registers = CALLER_SAVED_REGISTERS.to_vec(); + if let Some(reg) = result_reg { + if let Some(dst) = saved_registers.iter().position(|x| *x == reg) { + saved_registers.remove(dst); + } + } + + // Save registers on stack + for reg in saved_registers.iter() { + self.emit_ins(X86Instruction::push(*reg, None)); + } + + // Align RSP to 16 bytes + self.emit_ins(X86Instruction::push(RSP, None)); + self.emit_ins(X86Instruction::push(RSP, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 4, RSP, -16, None)); + + let stack_arguments = arguments.len().saturating_sub(ARGUMENT_REGISTERS.len()) as i64; + if stack_arguments % 2 != 0 { + // If we're going to pass an odd number of stack args we need to pad + // to preserve alignment + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 5, RSP, 8, None)); + } + + // Pass arguments + for argument in arguments { + let is_stack_argument = argument.index >= ARGUMENT_REGISTERS.len(); + let dst = if is_stack_argument { + u8::MAX // Never used + } else { + ARGUMENT_REGISTERS[argument.index] + }; + match argument.value { + Value::Register(reg) => { + if is_stack_argument { + self.emit_ins(X86Instruction::push(reg, None)); + } else if reg != dst { + self.emit_ins(X86Instruction::mov(OperandSize::S64, reg, dst)); + } + }, + Value::RegisterIndirect(reg, offset, user_provided) => { + debug_assert!(!user_provided); + if is_stack_argument { + self.emit_ins(X86Instruction::push(reg, Some(X86IndirectAccess::Offset(offset)))); + } else { + self.emit_ins(X86Instruction::load(OperandSize::S64, reg, dst, X86IndirectAccess::Offset(offset))); + } + }, + Value::RegisterPlusConstant32(reg, offset, user_provided) => { + debug_assert!(!user_provided); + if is_stack_argument { + self.emit_ins(X86Instruction::push(reg, None)); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, offset as i64, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); + } else { + self.emit_ins(X86Instruction::lea(OperandSize::S64, reg, dst, Some(X86IndirectAccess::Offset(offset)))); + } + }, + Value::RegisterPlusConstant64(reg, offset, user_provided) => { + debug_assert!(!user_provided); + if is_stack_argument { + self.emit_ins(X86Instruction::push(reg, None)); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, offset, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); + } else { + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, dst, offset)); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, reg, dst, 0, None)); + } + }, + Value::Constant64(value, user_provided) => { + debug_assert!(!user_provided && !is_stack_argument); + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, dst, value)); + }, + } + } + + match target { + Value::Register(reg) => { + self.emit_ins(X86Instruction::call_reg(reg, None)); + }, + Value::Constant64(value, user_provided) => { + debug_assert!(!user_provided); + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, RAX, value)); + self.emit_ins(X86Instruction::call_reg(RAX, None)); + }, + _ => { + #[cfg(debug_assertions)] + unreachable!(); + } + } + + // Save returned value in result register + if let Some(reg) = result_reg { + self.emit_ins(X86Instruction::mov(OperandSize::S64, RAX, reg)); + } + + // Restore registers from stack + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, + if stack_arguments % 2 != 0 { stack_arguments + 1 } else { stack_arguments } * 8, None)); + self.emit_ins(X86Instruction::load(OperandSize::S64, RSP, RSP, X86IndirectAccess::OffsetIndexShift(8, RSP, 0))); + + for reg in saved_registers.iter().rev() { + self.emit_ins(X86Instruction::pop(*reg)); + } + } + + #[inline] + fn emit_internal_call(&mut self, dst: Value) { + // Store PC in case the bounds check fails + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_SCRATCH, self.pc as i64)); + + self.emit_ins(X86Instruction::call_immediate(self.relative_to_anchor(ANCHOR_ANCHOR_INTERNAL_FUNCTION_CALL_PROLOGUE, 5))); + + match dst { + Value::Register(reg) => { + // Move vm target_address into RAX + self.emit_ins(X86Instruction::push(REGISTER_MAP[0], None)); + if reg != REGISTER_MAP[0] { + self.emit_ins(X86Instruction::mov(OperandSize::S64, reg, REGISTER_MAP[0])); + } + + self.emit_ins(X86Instruction::call_immediate(self.relative_to_anchor(ANCHOR_ANCHOR_INTERNAL_FUNCTION_CALL_REG, 5))); + + self.emit_validate_and_profile_instruction_count(false, None); + self.emit_ins(X86Instruction::mov(OperandSize::S64, REGISTER_MAP[0], REGISTER_OTHER_SCRATCH)); + self.emit_ins(X86Instruction::pop(REGISTER_MAP[0])); // Restore RAX + self.emit_ins(X86Instruction::call_reg(REGISTER_OTHER_SCRATCH, None)); // callq *REGISTER_OTHER_SCRATCH + }, + Value::Constant64(target_pc, user_provided) => { + debug_assert!(!user_provided); + self.emit_validate_and_profile_instruction_count(false, Some(target_pc as usize)); + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_SCRATCH, target_pc)); + let jump_offset = self.relative_to_target_pc(target_pc as usize, 5); + self.emit_ins(X86Instruction::call_immediate(jump_offset)); + }, + _ => { + #[cfg(debug_assertions)] + unreachable!(); + } + } + + self.emit_undo_profile_instruction_count(0); + + // Restore the previous frame pointer + self.emit_ins(X86Instruction::pop(REGISTER_MAP[FRAME_PTR_REG])); + for reg in REGISTER_MAP.iter().skip(FIRST_SCRATCH_REG).take(SCRATCH_REGS).rev() { + self.emit_ins(X86Instruction::pop(*reg)); + } + } + + #[inline] + fn emit_address_translation(&mut self, dst: Option, vm_addr: Value, len: u64, value: Option) { + debug_assert_ne!(dst.is_some(), value.is_some()); + + match vm_addr { + Value::RegisterPlusConstant64(reg, constant, user_provided) => { + if user_provided && self.should_sanitize_constant(constant) { + self.emit_sanitized_load_immediate(OperandSize::S64, REGISTER_SCRATCH, constant); + } else { + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_SCRATCH, constant)); + } + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, reg, REGISTER_SCRATCH, 0, None)); + }, + Value::Constant64(constant, user_provided) => { + if user_provided && self.should_sanitize_constant(constant) { + self.emit_sanitized_load_immediate(OperandSize::S64, REGISTER_SCRATCH, constant); + } else { + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_SCRATCH, constant)); + } + }, + _ => { + #[cfg(debug_assertions)] + unreachable!(); + }, + } + + match value { + Some(Value::Register(reg)) => { + self.emit_ins(X86Instruction::mov(OperandSize::S64, reg, REGISTER_OTHER_SCRATCH)); + } + Some(Value::Constant64(constant, user_provided)) => { + if user_provided && self.should_sanitize_constant(constant) { + self.emit_sanitized_load_immediate(OperandSize::S64, REGISTER_OTHER_SCRATCH, constant); + } else { + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_OTHER_SCRATCH, constant)); + } + } + _ => {} + } + + if self.config.enable_address_translation { + let access_type = if value.is_none() { AccessType::Load } else { AccessType::Store }; + let anchor = ANCHOR_TRANSLATE_MEMORY_ADDRESS + len.trailing_zeros() as usize + 4 * (access_type as usize); + self.emit_ins(X86Instruction::push_immediate(OperandSize::S64, self.pc as i32)); + self.emit_ins(X86Instruction::call_immediate(self.relative_to_anchor(anchor, 5))); + if let Some(dst) = dst { + self.emit_ins(X86Instruction::mov(OperandSize::S64, REGISTER_SCRATCH, dst)); + } + } else if let Some(dst) = dst { + match len { + 1 => self.emit_ins(X86Instruction::load(OperandSize::S8, REGISTER_SCRATCH, dst, X86IndirectAccess::Offset(0))), + 2 => self.emit_ins(X86Instruction::load(OperandSize::S16, REGISTER_SCRATCH, dst, X86IndirectAccess::Offset(0))), + 4 => self.emit_ins(X86Instruction::load(OperandSize::S32, REGISTER_SCRATCH, dst, X86IndirectAccess::Offset(0))), + 8 => self.emit_ins(X86Instruction::load(OperandSize::S64, REGISTER_SCRATCH, dst, X86IndirectAccess::Offset(0))), + _ => unreachable!(), + } + } else { + match len { + 1 => self.emit_ins(X86Instruction::store(OperandSize::S8, REGISTER_OTHER_SCRATCH, REGISTER_SCRATCH, X86IndirectAccess::Offset(0))), + 2 => self.emit_ins(X86Instruction::store(OperandSize::S16, REGISTER_OTHER_SCRATCH, REGISTER_SCRATCH, X86IndirectAccess::Offset(0))), + 4 => self.emit_ins(X86Instruction::store(OperandSize::S32, REGISTER_OTHER_SCRATCH, REGISTER_SCRATCH, X86IndirectAccess::Offset(0))), + 8 => self.emit_ins(X86Instruction::store(OperandSize::S64, REGISTER_OTHER_SCRATCH, REGISTER_SCRATCH, X86IndirectAccess::Offset(0))), + _ => unreachable!(), + } + } + } + + #[inline] + fn emit_conditional_branch_reg(&mut self, op: u8, bitwise: bool, first_operand: u8, second_operand: u8, target_pc: usize) { + self.emit_validate_and_profile_instruction_count(false, Some(target_pc)); + if bitwise { // Logical + self.emit_ins(X86Instruction::test(OperandSize::S64, first_operand, second_operand, None)); + } else { // Arithmetic + self.emit_ins(X86Instruction::cmp(OperandSize::S64, first_operand, second_operand, None)); + } + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_SCRATCH, target_pc as i64)); + let jump_offset = self.relative_to_target_pc(target_pc, 6); + self.emit_ins(X86Instruction::conditional_jump_immediate(op, jump_offset)); + self.emit_undo_profile_instruction_count(target_pc); + } + + #[inline] + fn emit_conditional_branch_imm(&mut self, op: u8, bitwise: bool, immediate: i64, second_operand: u8, target_pc: usize) { + self.emit_validate_and_profile_instruction_count(false, Some(target_pc)); + if self.should_sanitize_constant(immediate) { + self.emit_sanitized_load_immediate(OperandSize::S64, REGISTER_SCRATCH, immediate); + if bitwise { // Logical + self.emit_ins(X86Instruction::test(OperandSize::S64, REGISTER_SCRATCH, second_operand, None)); + } else { // Arithmetic + self.emit_ins(X86Instruction::cmp(OperandSize::S64, REGISTER_SCRATCH, second_operand, None)); + } + } else if bitwise { // Logical + self.emit_ins(X86Instruction::test_immediate(OperandSize::S64, second_operand, immediate, None)); + } else { // Arithmetic + self.emit_ins(X86Instruction::cmp_immediate(OperandSize::S64, second_operand, immediate, None)); + } + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_SCRATCH, target_pc as i64)); + let jump_offset = self.relative_to_target_pc(target_pc, 6); + self.emit_ins(X86Instruction::conditional_jump_immediate(op, jump_offset)); + self.emit_undo_profile_instruction_count(target_pc); + } + + fn emit_shift(&mut self, size: OperandSize, opcode_extension: u8, source: u8, destination: u8, immediate: Option) { + if let Some(immediate) = immediate { + if self.should_sanitize_constant(immediate) { + self.emit_sanitized_load_immediate(OperandSize::S32, source, immediate); + } else { + self.emit_ins(X86Instruction::alu(size, 0xc1, opcode_extension, destination, immediate, None)); + return; + } + } + if let OperandSize::S32 = size { + self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x81, 4, destination, -1, None)); // Mask to 32 bit + } + if source == RCX { + if destination == RCX { + self.emit_ins(X86Instruction::alu(size, 0xd3, opcode_extension, destination, 0, None)); + } else { + self.emit_ins(X86Instruction::push(RCX, None)); + self.emit_ins(X86Instruction::alu(size, 0xd3, opcode_extension, destination, 0, None)); + self.emit_ins(X86Instruction::pop(RCX)); + } + } else if destination == RCX { + if source != REGISTER_SCRATCH { + self.emit_ins(X86Instruction::push(source, None)); + } + self.emit_ins(X86Instruction::xchg(OperandSize::S64, source, RCX, None)); + self.emit_ins(X86Instruction::alu(size, 0xd3, opcode_extension, source, 0, None)); + self.emit_ins(X86Instruction::mov(OperandSize::S64, source, RCX)); + if source != REGISTER_SCRATCH { + self.emit_ins(X86Instruction::pop(source)); + } + } else { + self.emit_ins(X86Instruction::push(RCX, None)); + self.emit_ins(X86Instruction::mov(OperandSize::S64, source, RCX)); + self.emit_ins(X86Instruction::alu(size, 0xd3, opcode_extension, destination, 0, None)); + self.emit_ins(X86Instruction::pop(RCX)); + } + } + + #[allow(clippy::too_many_arguments)] + fn emit_product_quotient_remainder(&mut self, size: OperandSize, alt_dst: bool, division: bool, signed: bool, src: u8, dst: u8, imm: Option) { + // LMUL UHMUL SHMUL UDIV SDIV UREM SREM + // ALU F7/4 F7/4 F7/5 F7/6 F7/7 F7/6 F7/7 + // src-in REGISTER_SCRATCH REGISTER_SCRATCH REGISTER_SCRATCH REGISTER_SCRATCH REGISTER_SCRATCH REGISTER_SCRATCH REGISTER_SCRATCH + // dst-in RAX RAX RAX RAX RAX RAX RAX + // dst-out RAX RDX RDX RAX RAX RDX RDX + + if division { + // Prevent division by zero + if imm.is_none() { + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_SCRATCH, self.pc as i64)); // Save pc + self.emit_ins(X86Instruction::test(size, src, src, None)); // src == 0 + self.emit_ins(X86Instruction::conditional_jump_immediate(0x84, self.relative_to_anchor(ANCHOR_DIV_BY_ZERO, 6))); + } + + // Signed division overflows with MIN / -1. + // If we have an immediate and it's not -1, we can skip the following check. + if signed && imm.unwrap_or(-1) == -1 { + self.emit_ins(X86Instruction::load_immediate(size, REGISTER_SCRATCH, if let OperandSize::S64 = size { i64::MIN } else { i32::MIN as i64 })); + self.emit_ins(X86Instruction::cmp(size, dst, REGISTER_SCRATCH, None)); // dst == MIN + + if imm.is_none() { + // The exception case is: dst == MIN && src == -1 + // Via De Morgan's law becomes: !(dst != MIN || src != -1) + // Also, we know that src != 0 in here, so we can use it to set REGISTER_SCRATCH to something not zero + self.emit_ins(X86Instruction::load_immediate(size, REGISTER_SCRATCH, 0)); // No XOR here because we need to keep the status flags + self.emit_ins(X86Instruction::cmov(size, 0x45, src, REGISTER_SCRATCH)); // if dst != MIN { REGISTER_SCRATCH = src; } + self.emit_ins(X86Instruction::cmp_immediate(size, src, -1, None)); // src == -1 + self.emit_ins(X86Instruction::cmov(size, 0x45, src, REGISTER_SCRATCH)); // if src != -1 { REGISTER_SCRATCH = src; } + self.emit_ins(X86Instruction::test(size, REGISTER_SCRATCH, REGISTER_SCRATCH, None)); // REGISTER_SCRATCH == 0 + } + + // MIN / -1, raise EbpfError::DivideOverflow + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_SCRATCH, self.pc as i64)); + self.emit_ins(X86Instruction::conditional_jump_immediate(0x84, self.relative_to_anchor(ANCHOR_DIV_OVERFLOW, 6))); + } + } + + if let Some(imm) = imm { + if self.should_sanitize_constant(imm) { + self.emit_sanitized_load_immediate(OperandSize::S64, REGISTER_SCRATCH, imm); + } else { + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_SCRATCH, imm)); + } + } else { + self.emit_ins(X86Instruction::mov(OperandSize::S64, src, REGISTER_SCRATCH)); + } + if dst != RAX { + self.emit_ins(X86Instruction::push(RAX, None)); + self.emit_ins(X86Instruction::mov(OperandSize::S64, dst, RAX)); + } + if dst != RDX { + self.emit_ins(X86Instruction::push(RDX, None)); + } + if division { + if signed { + self.emit_ins(X86Instruction::sign_extend_rax_rdx(size)); + } else { + self.emit_ins(X86Instruction::alu(size, 0x31, RDX, RDX, 0, None)); // RDX = 0 + } + } + + self.emit_ins(X86Instruction::alu(size, 0xf7, 0x4 | (division as u8) << 1 | signed as u8, REGISTER_SCRATCH, 0, None)); + + if dst != RDX { + if alt_dst { + self.emit_ins(X86Instruction::mov(OperandSize::S64, RDX, dst)); + } + self.emit_ins(X86Instruction::pop(RDX)); + } + if dst != RAX { + if !alt_dst { + self.emit_ins(X86Instruction::mov(OperandSize::S64, RAX, dst)); + } + self.emit_ins(X86Instruction::pop(RAX)); + } + if let OperandSize::S32 = size { + if signed { + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + } + } + } + + fn emit_set_exception_kind(&mut self, err: EbpfError) { + let err_kind = unsafe { *(&err as *const _ as *const u64) }; + let err_discriminant = ProgramResult::Err(err).discriminant(); + self.emit_ins(X86Instruction::lea(OperandSize::S64, REGISTER_PTR_TO_VM, REGISTER_OTHER_SCRATCH, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::ProgramResult))))); + self.emit_ins(X86Instruction::store_immediate(OperandSize::S64, REGISTER_OTHER_SCRATCH, X86IndirectAccess::Offset(0), err_discriminant as i64)); // result.discriminant = err_discriminant; + self.emit_ins(X86Instruction::store_immediate(OperandSize::S64, REGISTER_OTHER_SCRATCH, X86IndirectAccess::Offset(std::mem::size_of::() as i32), err_kind as i64)); // err.kind = err_kind; + } + + fn emit_result_is_err(&mut self, destination: u8) { + let ok = ProgramResult::Ok(0); + let ok_discriminant = ok.discriminant(); + self.emit_ins(X86Instruction::lea(OperandSize::S64, REGISTER_PTR_TO_VM, destination, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::ProgramResult))))); + self.emit_ins(X86Instruction::cmp_immediate(OperandSize::S64, destination, ok_discriminant as i64, Some(X86IndirectAccess::Offset(0)))); + } + + fn emit_subroutines(&mut self) { + // Routine for instruction tracing + if self.config.enable_instruction_tracing { + self.set_anchor(ANCHOR_TRACE); + // Save registers on stack + self.emit_ins(X86Instruction::push(REGISTER_SCRATCH, None)); + for reg in REGISTER_MAP.iter().rev() { + self.emit_ins(X86Instruction::push(*reg, None)); + } + self.emit_ins(X86Instruction::mov(OperandSize::S64, RSP, REGISTER_MAP[0])); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, - 8 * 3, None)); // RSP -= 8 * 3; + self.emit_rust_call(Value::Constant64(C::trace as *const u8 as i64, false), &[ + Argument { index: 1, value: Value::Register(REGISTER_MAP[0]) }, // registers + Argument { index: 0, value: Value::RegisterIndirect(REGISTER_PTR_TO_VM, self.slot_in_vm(RuntimeEnvironmentSlot::ContextObjectPointer), false) }, + ], None); + // Pop stack and return + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, 8 * 3, None)); // RSP += 8 * 3; + self.emit_ins(X86Instruction::pop(REGISTER_MAP[0])); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, 8 * (REGISTER_MAP.len() - 1) as i64, None)); // RSP += 8 * (REGISTER_MAP.len() - 1); + self.emit_ins(X86Instruction::pop(REGISTER_SCRATCH)); + self.emit_ins(X86Instruction::return_near()); + } + + // Epilogue + self.set_anchor(ANCHOR_EPILOGUE); + if self.config.enable_instruction_meter { + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 5, REGISTER_INSTRUCTION_METER, 1, None)); // REGISTER_INSTRUCTION_METER -= 1; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, 0, None)); // REGISTER_INSTRUCTION_METER -= pc; + // *DueInsnCount = *PreviousInstructionMeter - REGISTER_INSTRUCTION_METER; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x2B, REGISTER_INSTRUCTION_METER, REGISTER_PTR_TO_VM, 0, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::PreviousInstructionMeter))))); // REGISTER_INSTRUCTION_METER -= *PreviousInstructionMeter; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xf7, 3, REGISTER_INSTRUCTION_METER, 0, None)); // REGISTER_INSTRUCTION_METER = -REGISTER_INSTRUCTION_METER; + self.emit_ins(X86Instruction::store(OperandSize::S64, REGISTER_INSTRUCTION_METER, REGISTER_PTR_TO_VM, X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::DueInsnCount)))); // *DueInsnCount = REGISTER_INSTRUCTION_METER; + } + // Print stop watch value + fn stopwatch_result(numerator: u64, denominator: u64) { + println!("Stop watch: {} / {} = {}", numerator, denominator, if denominator == 0 { 0.0 } else { numerator as f64 / denominator as f64 }); + } + if self.stopwatch_is_active { + self.emit_rust_call(Value::Constant64(stopwatch_result as *const u8 as i64, false), &[ + Argument { index: 1, value: Value::RegisterIndirect(REGISTER_PTR_TO_VM, self.slot_in_vm(RuntimeEnvironmentSlot::StopwatchDenominator), false) }, + Argument { index: 0, value: Value::RegisterIndirect(REGISTER_PTR_TO_VM, self.slot_in_vm(RuntimeEnvironmentSlot::StopwatchNumerator), false) }, + ], None); + } + // Restore stack pointer in case we did not exit gracefully + self.emit_ins(X86Instruction::load(OperandSize::S64, REGISTER_PTR_TO_VM, RSP, X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::HostStackPointer)))); + self.emit_ins(X86Instruction::return_near()); + + // Handler for EbpfError::ExceededMaxInstructions + self.set_anchor(ANCHOR_THROW_EXCEEDED_MAX_INSTRUCTIONS); + self.emit_set_exception_kind(EbpfError::ExceededMaxInstructions); + self.emit_ins(X86Instruction::mov(OperandSize::S64, REGISTER_INSTRUCTION_METER, REGISTER_SCRATCH)); // REGISTER_SCRATCH = REGISTER_INSTRUCTION_METER; + // Fall through + + // Epilogue for errors + self.set_anchor(ANCHOR_THROW_EXCEPTION_UNCHECKED); + self.emit_ins(X86Instruction::store(OperandSize::S64, REGISTER_SCRATCH, REGISTER_PTR_TO_VM, X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::Registers) + 11 * std::mem::size_of::() as i32))); // registers[11] = pc; + self.emit_ins(X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_EPILOGUE, 5))); + + // Quit gracefully + self.set_anchor(ANCHOR_EXIT); + self.emit_validate_instruction_count(false, None); + self.emit_ins(X86Instruction::lea(OperandSize::S64, REGISTER_PTR_TO_VM, REGISTER_OTHER_SCRATCH, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::ProgramResult))))); + self.emit_ins(X86Instruction::store(OperandSize::S64, REGISTER_MAP[0], REGISTER_OTHER_SCRATCH, X86IndirectAccess::Offset(std::mem::size_of::() as i32))); // result.return_value = R0; + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_MAP[0], 0)); + self.emit_ins(X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_EPILOGUE, 5))); + + // Handler for exceptions which report their pc + self.set_anchor(ANCHOR_THROW_EXCEPTION); + // Validate that we did not reach the instruction meter limit before the exception occured + self.emit_validate_instruction_count(false, None); + self.emit_ins(X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_THROW_EXCEPTION_UNCHECKED, 5))); + + // Handler for EbpfError::CallDepthExceeded + self.set_anchor(ANCHOR_CALL_DEPTH_EXCEEDED); + self.emit_set_exception_kind(EbpfError::CallDepthExceeded); + self.emit_ins(X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_THROW_EXCEPTION, 5))); + + // Handler for EbpfError::CallOutsideTextSegment + self.set_anchor(ANCHOR_CALL_OUTSIDE_TEXT_SEGMENT); + self.emit_set_exception_kind(EbpfError::CallOutsideTextSegment); + self.emit_ins(X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_THROW_EXCEPTION, 5))); + + // Handler for EbpfError::DivideByZero + self.set_anchor(ANCHOR_DIV_BY_ZERO); + self.emit_set_exception_kind(EbpfError::DivideByZero); + self.emit_ins(X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_THROW_EXCEPTION, 5))); + + // Handler for EbpfError::DivideOverflow + self.set_anchor(ANCHOR_DIV_OVERFLOW); + self.emit_set_exception_kind(EbpfError::DivideOverflow); + self.emit_ins(X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_THROW_EXCEPTION, 5))); + + // Handler for EbpfError::UnsupportedInstruction + self.set_anchor(ANCHOR_CALL_UNSUPPORTED_INSTRUCTION); + if self.config.enable_instruction_tracing { + self.emit_ins(X86Instruction::call_immediate(self.relative_to_anchor(ANCHOR_TRACE, 5))); + } + self.emit_set_exception_kind(EbpfError::UnsupportedInstruction); + self.emit_ins(X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_THROW_EXCEPTION, 5))); + + // Routine for external functions + self.set_anchor(ANCHOR_EXTERNAL_FUNCTION_CALL); + self.emit_ins(X86Instruction::push_immediate(OperandSize::S64, -1)); // Used as PC value in error case, acts as stack padding otherwise + if self.config.enable_instruction_meter { + self.emit_ins(X86Instruction::store(OperandSize::S64, REGISTER_INSTRUCTION_METER, REGISTER_PTR_TO_VM, X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::DueInsnCount)))); // *DueInsnCount = REGISTER_INSTRUCTION_METER; + } + self.emit_rust_call(Value::Register(REGISTER_SCRATCH), &[ + Argument { index: 5, value: Value::Register(ARGUMENT_REGISTERS[5]) }, + Argument { index: 4, value: Value::Register(ARGUMENT_REGISTERS[4]) }, + Argument { index: 3, value: Value::Register(ARGUMENT_REGISTERS[3]) }, + Argument { index: 2, value: Value::Register(ARGUMENT_REGISTERS[2]) }, + Argument { index: 1, value: Value::Register(ARGUMENT_REGISTERS[1]) }, + Argument { index: 0, value: Value::Register(REGISTER_PTR_TO_VM) }, + ], None); + if self.config.enable_instruction_meter { + self.emit_ins(X86Instruction::load(OperandSize::S64, REGISTER_PTR_TO_VM, REGISTER_INSTRUCTION_METER, X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::PreviousInstructionMeter)))); // REGISTER_INSTRUCTION_METER = *PreviousInstructionMeter; + } + + // Test if result indicates that an error occured + self.emit_result_is_err(REGISTER_SCRATCH); + self.emit_ins(X86Instruction::pop(REGISTER_SCRATCH)); + self.emit_ins(X86Instruction::conditional_jump_immediate(0x85, self.relative_to_anchor(ANCHOR_EPILOGUE, 6))); + // Store Ok value in result register + self.emit_ins(X86Instruction::lea(OperandSize::S64, REGISTER_PTR_TO_VM, REGISTER_SCRATCH, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::ProgramResult))))); + self.emit_ins(X86Instruction::load(OperandSize::S64, REGISTER_SCRATCH, REGISTER_MAP[0], X86IndirectAccess::Offset(8))); + self.emit_ins(X86Instruction::return_near()); + + // Routine for prologue of emit_internal_call() + self.set_anchor(ANCHOR_ANCHOR_INTERNAL_FUNCTION_CALL_PROLOGUE); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 5, RSP, 8 * (SCRATCH_REGS + 1) as i64, None)); // alloca + self.emit_ins(X86Instruction::store(OperandSize::S64, REGISTER_SCRATCH, RSP, X86IndirectAccess::OffsetIndexShift(0, RSP, 0))); // Save original REGISTER_SCRATCH + self.emit_ins(X86Instruction::load(OperandSize::S64, RSP, REGISTER_SCRATCH, X86IndirectAccess::OffsetIndexShift(8 * (SCRATCH_REGS + 1) as i32, RSP, 0))); // Load return address + for (i, reg) in REGISTER_MAP.iter().skip(FIRST_SCRATCH_REG).take(SCRATCH_REGS).enumerate() { + self.emit_ins(X86Instruction::store(OperandSize::S64, *reg, RSP, X86IndirectAccess::OffsetIndexShift(8 * (SCRATCH_REGS - i + 1) as i32, RSP, 0))); // Push SCRATCH_REG + } + // Push the caller's frame pointer. The code to restore it is emitted at the end of emit_internal_call(). + self.emit_ins(X86Instruction::store(OperandSize::S64, REGISTER_MAP[FRAME_PTR_REG], RSP, X86IndirectAccess::OffsetIndexShift(8, RSP, 0))); + self.emit_ins(X86Instruction::xchg(OperandSize::S64, REGISTER_SCRATCH, RSP, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); // Push return address and restore original REGISTER_SCRATCH + + // Increase CallDepth + let call_depth_access = X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::CallDepth)); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, REGISTER_PTR_TO_VM, 1, Some(call_depth_access))); + self.emit_ins(X86Instruction::load(OperandSize::S64, REGISTER_PTR_TO_VM, REGISTER_MAP[FRAME_PTR_REG], call_depth_access)); + // If CallDepth == self.config.max_call_depth, stop and return CallDepthExceeded + self.emit_ins(X86Instruction::cmp_immediate(OperandSize::S32, REGISTER_MAP[FRAME_PTR_REG], self.config.max_call_depth as i64, None)); + self.emit_ins(X86Instruction::conditional_jump_immediate(0x83, self.relative_to_anchor(ANCHOR_CALL_DEPTH_EXCEEDED, 6))); + + // Setup the frame pointer for the new frame. What we do depends on whether we're using dynamic or fixed frames. + let stack_pointer_access = X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::StackPointer)); + if !self.executable.get_sbpf_version().dynamic_stack_frames() { + // With fixed frames we start the new frame at the next fixed offset + let stack_frame_size = self.config.stack_frame_size as i64 * if self.config.enable_stack_frame_gaps { 2 } else { 1 }; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, REGISTER_PTR_TO_VM, stack_frame_size, Some(stack_pointer_access))); // env.stack_pointer += stack_frame_size; + } + self.emit_ins(X86Instruction::load(OperandSize::S64, REGISTER_PTR_TO_VM, REGISTER_MAP[FRAME_PTR_REG], stack_pointer_access)); // reg[ebpf::FRAME_PTR_REG] = env.stack_pointer; + self.emit_ins(X86Instruction::return_near()); + + // Routine for emit_internal_call(Value::Register()) + self.set_anchor(ANCHOR_ANCHOR_INTERNAL_FUNCTION_CALL_REG); + // Calculate offset relative to instruction_addresses + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_MAP[FRAME_PTR_REG], self.program_vm_addr as i64)); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, REGISTER_MAP[FRAME_PTR_REG], REGISTER_MAP[0], 0, None)); // RAX -= self.program_vm_addr; + // Force alignment of RAX + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 4, REGISTER_MAP[0], !(INSN_SIZE as i64 - 1), None)); // RAX &= !(INSN_SIZE - 1); + // Bound check + // if(RAX >= number_of_instructions * INSN_SIZE) throw CALL_OUTSIDE_TEXT_SEGMENT; + let number_of_instructions = self.result.pc_section.len(); + self.emit_ins(X86Instruction::cmp_immediate(OperandSize::S64, REGISTER_MAP[0], (number_of_instructions * INSN_SIZE) as i64, None)); + self.emit_ins(X86Instruction::conditional_jump_immediate(0x83, self.relative_to_anchor(ANCHOR_CALL_OUTSIDE_TEXT_SEGMENT, 6))); + // Calculate the target_pc (dst / INSN_SIZE) to update REGISTER_INSTRUCTION_METER + // and as target pc for potential ANCHOR_CALL_UNSUPPORTED_INSTRUCTION + let shift_amount = INSN_SIZE.trailing_zeros(); + debug_assert_eq!(INSN_SIZE, 1 << shift_amount); + self.emit_ins(X86Instruction::mov(OperandSize::S64, REGISTER_MAP[0], REGISTER_SCRATCH)); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xc1, 5, REGISTER_SCRATCH, shift_amount as i64, None)); + // Load host target_address from self.result.pc_section + debug_assert_eq!(INSN_SIZE, 8); // Because the instruction size is also the slot size we do not need to shift the offset + self.emit_ins(X86Instruction::load_immediate(OperandSize::S64, REGISTER_MAP[FRAME_PTR_REG], self.result.pc_section.as_ptr() as i64)); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_MAP[FRAME_PTR_REG], REGISTER_MAP[0], 0, None)); // RAX += self.result.pc_section; + self.emit_ins(X86Instruction::load(OperandSize::S64, REGISTER_MAP[0], REGISTER_MAP[0], X86IndirectAccess::Offset(0))); // RAX = self.result.pc_section[RAX / 8]; + // Load the frame pointer again since we've clobbered REGISTER_MAP[FRAME_PTR_REG] + self.emit_ins(X86Instruction::load(OperandSize::S64, REGISTER_PTR_TO_VM, REGISTER_MAP[FRAME_PTR_REG], stack_pointer_access)); + self.emit_ins(X86Instruction::return_near()); + + // Translates a vm memory address to a host memory address + for (access_type, len) in &[ + (AccessType::Load, 1i32), + (AccessType::Load, 2i32), + (AccessType::Load, 4i32), + (AccessType::Load, 8i32), + (AccessType::Store, 1i32), + (AccessType::Store, 2i32), + (AccessType::Store, 4i32), + (AccessType::Store, 8i32), + ] { + let target_offset = len.trailing_zeros() as usize + 4 * (*access_type as usize); + self.set_anchor(ANCHOR_TRANSLATE_MEMORY_ADDRESS + target_offset); + // call MemoryMapping::(load|store) storing the result in RuntimeEnvironmentSlot::ProgramResult + if *access_type == AccessType::Load { + let load = match len { + 1 => MemoryMapping::load:: as *const u8 as i64, + 2 => MemoryMapping::load:: as *const u8 as i64, + 4 => MemoryMapping::load:: as *const u8 as i64, + 8 => MemoryMapping::load:: as *const u8 as i64, + _ => unreachable!() + }; + self.emit_rust_call(Value::Constant64(load, false), &[ + Argument { index: 2, value: Value::Register(REGISTER_SCRATCH) }, // Specify first as the src register could be overwritten by other arguments + Argument { index: 3, value: Value::Constant64(0, false) }, // self.pc is set later + Argument { index: 1, value: Value::RegisterPlusConstant32(REGISTER_PTR_TO_VM, self.slot_in_vm(RuntimeEnvironmentSlot::MemoryMapping), false) }, + Argument { index: 0, value: Value::RegisterPlusConstant32(REGISTER_PTR_TO_VM, self.slot_in_vm(RuntimeEnvironmentSlot::ProgramResult), false) }, + ], None); + } else { + let store = match len { + 1 => MemoryMapping::store:: as *const u8 as i64, + 2 => MemoryMapping::store:: as *const u8 as i64, + 4 => MemoryMapping::store:: as *const u8 as i64, + 8 => MemoryMapping::store:: as *const u8 as i64, + _ => unreachable!() + }; + self.emit_rust_call(Value::Constant64(store, false), &[ + Argument { index: 3, value: Value::Register(REGISTER_SCRATCH) }, // Specify first as the src register could be overwritten by other arguments + Argument { index: 2, value: Value::Register(REGISTER_OTHER_SCRATCH) }, + Argument { index: 4, value: Value::Constant64(0, false) }, // self.pc is set later + Argument { index: 1, value: Value::RegisterPlusConstant32(REGISTER_PTR_TO_VM, self.slot_in_vm(RuntimeEnvironmentSlot::MemoryMapping), false) }, + Argument { index: 0, value: Value::RegisterPlusConstant32(REGISTER_PTR_TO_VM, self.slot_in_vm(RuntimeEnvironmentSlot::ProgramResult), false) }, + ], None); + } + + // Throw error if the result indicates one + self.emit_result_is_err(REGISTER_SCRATCH); + self.emit_ins(X86Instruction::pop(REGISTER_SCRATCH)); // REGISTER_SCRATCH = self.pc + self.emit_ins(X86Instruction::xchg(OperandSize::S64, REGISTER_SCRATCH, RSP, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); // Swap return address and self.pc + self.emit_ins(X86Instruction::conditional_jump_immediate(0x85, self.relative_to_anchor(ANCHOR_THROW_EXCEPTION, 6))); + + // unwrap() the result into REGISTER_SCRATCH + self.emit_ins(X86Instruction::load(OperandSize::S64, REGISTER_PTR_TO_VM, REGISTER_SCRATCH, X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::ProgramResult) + std::mem::size_of::() as i32))); + + self.emit_ins(X86Instruction::return_near()); + } + } + + fn set_anchor(&mut self, anchor: usize) { + self.anchors[anchor] = unsafe { self.result.text_section.as_ptr().add(self.offset_in_text_section) }; + } + + // instruction_length = 5 (Unconditional jump / call) + // instruction_length = 6 (Conditional jump) + #[inline] + fn relative_to_anchor(&self, anchor: usize, instruction_length: usize) -> i32 { + let instruction_end = unsafe { self.result.text_section.as_ptr().add(self.offset_in_text_section).add(instruction_length) }; + let destination = self.anchors[anchor]; + debug_assert!(!destination.is_null()); + (unsafe { destination.offset_from(instruction_end) } as i32) // Relative jump + } + + #[inline] + fn relative_to_target_pc(&mut self, target_pc: usize, instruction_length: usize) -> i32 { + let instruction_end = unsafe { self.result.text_section.as_ptr().add(self.offset_in_text_section).add(instruction_length) }; + let destination = if self.result.pc_section[target_pc] != 0 { + // Backward jump + self.result.pc_section[target_pc] as *const u8 + } else { + // Forward jump, needs relocation + self.text_section_jumps.push(Jump { location: unsafe { instruction_end.sub(4) }, target_pc }); + return 0; + }; + debug_assert!(!destination.is_null()); + (unsafe { destination.offset_from(instruction_end) } as i32) // Relative jump + } + + fn resolve_jumps(&mut self) { + // Relocate forward jumps + for jump in &self.text_section_jumps { + let destination = self.result.pc_section[jump.target_pc] as *const u8; + let offset_value = + unsafe { destination.offset_from(jump.location) } as i32 // Relative jump + - mem::size_of::() as i32; // Jump from end of instruction + unsafe { ptr::write_unaligned(jump.location as *mut i32, offset_value); } + } + // There is no `VerifierError::JumpToMiddleOfLDDW` for `call imm` so patch it here + let call_unsupported_instruction = self.anchors[ANCHOR_CALL_UNSUPPORTED_INSTRUCTION] as usize; + if self.executable.get_sbpf_version().static_syscalls() { + let mut prev_pc = 0; + for current_pc in self.executable.get_function_registry().keys() { + if current_pc as usize >= self.result.pc_section.len() { + break; + } + for pc in prev_pc..current_pc as usize { + self.result.pc_section[pc] = call_unsupported_instruction; + } + prev_pc = current_pc as usize + 1; + } + for pc in prev_pc..self.result.pc_section.len() { + self.result.pc_section[pc] = call_unsupported_instruction; + } + } + } +} + +#[cfg(all(test, target_arch = "x86_64", not(target_os = "windows")))] +mod tests { + use super::*; + use crate::{ + program::{BuiltinFunction, BuiltinProgram, FunctionRegistry, SBPFVersion}, + syscalls, + vm::TestContextObject, + }; + use byteorder::{ByteOrder, LittleEndian}; + use std::sync::Arc; + + #[test] + fn test_runtime_environment_slots() { + let executable = create_mockup_executable(&[]); + let mut context_object = TestContextObject::new(0); + let env = EbpfVm::new( + executable.get_loader().clone(), + executable.get_sbpf_version(), + &mut context_object, + MemoryMapping::new_identity(), + 0, + ); + + macro_rules! check_slot { + ($env:expr, $entry:ident, $slot:ident) => { + assert_eq!( + unsafe { + (&$env.$entry as *const _ as *const u64) + .offset_from(&$env as *const _ as *const u64) as usize + }, + RuntimeEnvironmentSlot::$slot as usize, + ); + }; + } + + check_slot!(env, host_stack_pointer, HostStackPointer); + check_slot!(env, call_depth, CallDepth); + check_slot!(env, stack_pointer, StackPointer); + check_slot!(env, context_object_pointer, ContextObjectPointer); + check_slot!(env, previous_instruction_meter, PreviousInstructionMeter); + check_slot!(env, due_insn_count, DueInsnCount); + check_slot!(env, stopwatch_numerator, StopwatchNumerator); + check_slot!(env, stopwatch_denominator, StopwatchDenominator); + check_slot!(env, registers, Registers); + check_slot!(env, program_result, ProgramResult); + check_slot!(env, memory_mapping, MemoryMapping); + } + + fn create_mockup_executable(program: &[u8]) -> Executable { + let mut function_registry = + FunctionRegistry::>::default(); + function_registry + .register_function_hashed(*b"gather_bytes", syscalls::SyscallGatherBytes::vm) + .unwrap(); + let loader = BuiltinProgram::new_loader( + Config { + noop_instruction_rate: 0, + ..Config::default() + }, + function_registry, + ); + let mut function_registry = FunctionRegistry::default(); + function_registry + .register_function(8, *b"function_foo", 8) + .unwrap(); + Executable::::from_text_bytes( + program, + Arc::new(loader), + SBPFVersion::V2, + function_registry, + ) + .unwrap() + } + + #[test] + fn test_code_length_estimate() { + const INSTRUCTION_COUNT: usize = 256; + let mut prog = [0; ebpf::INSN_SIZE * INSTRUCTION_COUNT]; + + let empty_program_machine_code_length = { + prog[0] = ebpf::EXIT; + let mut executable = create_mockup_executable(&prog[0..ebpf::INSN_SIZE]); + Executable::::jit_compile(&mut executable).unwrap(); + executable + .get_compiled_program() + .unwrap() + .machine_code_length() + }; + assert!(empty_program_machine_code_length <= MAX_EMPTY_PROGRAM_MACHINE_CODE_LENGTH); + + for mut opcode in 0x00..=0xFF { + let (registers, immediate) = match opcode { + 0x85 | 0x8D => (0x88, 8), + 0x86 => { + // Put external function calls on a separate loop iteration + opcode = 0x85; + (0x00, 0x91020CDD) + } + 0x87 => { + // Put invalid function calls on a separate loop iteration + opcode = 0x85; + (0x88, 0x91020CDD) + } + 0xD4 | 0xDC => (0x88, 16), + _ => (0x88, 0xFFFFFFFF), + }; + for pc in 0..INSTRUCTION_COUNT { + prog[pc * ebpf::INSN_SIZE] = opcode; + prog[pc * ebpf::INSN_SIZE + 1] = registers; + prog[pc * ebpf::INSN_SIZE + 2] = 0xFF; + prog[pc * ebpf::INSN_SIZE + 3] = 0xFF; + LittleEndian::write_u32(&mut prog[pc * ebpf::INSN_SIZE + 4..], immediate); + } + let mut executable = create_mockup_executable(&prog); + let result = Executable::::jit_compile(&mut executable); + if result.is_err() { + assert!(matches!( + result.unwrap_err(), + EbpfError::UnsupportedInstruction + )); + continue; + } + let machine_code_length = executable + .get_compiled_program() + .unwrap() + .machine_code_length() + - empty_program_machine_code_length; + let instruction_count = if opcode == 0x18 { + // LDDW takes two slots + INSTRUCTION_COUNT / 2 + } else { + INSTRUCTION_COUNT + }; + let machine_code_length_per_instruction = + (machine_code_length as f64 / instruction_count as f64 + 0.5) as usize; + assert!(machine_code_length_per_instruction <= MAX_MACHINE_CODE_LENGTH_PER_INSTRUCTION); + /*println!("opcode={:02X} machine_code_length_per_instruction={}", opcode, machine_code_length_per_instruction); + let analysis = crate::static_analysis::Analysis::from_executable(&executable).unwrap(); + { + let stdout = std::io::stdout(); + analysis.disassemble(&mut stdout.lock()).unwrap(); + }*/ + } + } +} diff --git a/rbpf/src/lib.rs b/rbpf/src/lib.rs new file mode 100644 index 00000000000000..ae4c5652b6ee87 --- /dev/null +++ b/rbpf/src/lib.rs @@ -0,0 +1,86 @@ +// Derived from uBPF +// Copyright 2015 Big Switch Networks, Inc +// (uBPF: VM architecture, parts of the interpreter, originally in C) +// Copyright 2016 6WIND S.A. +// (Translation to Rust, MetaBuff/multiple classes addition, hashmaps for syscalls) +// Copyright 2020 Solana Maintainers +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +//! Virtual machine and JIT compiler for eBPF programs. +#![warn(missing_docs)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/qmonnet/rbpf/master/misc/rbpf.png", + html_favicon_url = "https://raw.githubusercontent.com/qmonnet/rbpf/master/misc/rbpf.ico" +)] +#![deny(clippy::arithmetic_side_effects)] + +extern crate byteorder; +extern crate combine; +extern crate hash32; +extern crate log; +extern crate rand; +extern crate thiserror; + +pub mod aligned_memory; +mod asm_parser; +pub mod assembler; +#[cfg(feature = "debugger")] +pub mod debugger; +pub mod disassembler; +pub mod ebpf; +pub mod elf; +pub mod elf_parser; +pub mod elf_parser_glue; +pub mod error; +pub mod fuzz; +pub mod insn_builder; +pub mod interpreter; +#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] +mod jit; +#[cfg(feature = "jit")] +mod memory_management; +pub mod memory_region; +pub mod program; +pub mod static_analysis; +pub mod syscalls; +pub mod verifier; +pub mod vm; +#[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] +mod x86; + +trait ErrCheckedArithmetic: Sized { + fn err_checked_add(self, other: Self) -> Result; + fn err_checked_sub(self, other: Self) -> Result; + fn err_checked_mul(self, other: Self) -> Result; + fn err_checked_div(self, other: Self) -> Result; +} +struct ArithmeticOverflow; + +macro_rules! impl_err_checked_arithmetic { + ($($ty:ty),*) => { + $( + impl ErrCheckedArithmetic for $ty { + fn err_checked_add(self, other: $ty) -> Result { + self.checked_add(other).ok_or(ArithmeticOverflow) + } + + fn err_checked_sub(self, other: $ty) -> Result { + self.checked_sub(other).ok_or(ArithmeticOverflow) + } + + fn err_checked_mul(self, other: $ty) -> Result { + self.checked_mul(other).ok_or(ArithmeticOverflow) + } + + fn err_checked_div(self, other: $ty) -> Result { + self.checked_div(other).ok_or(ArithmeticOverflow) + } + } + )* + } +} + +impl_err_checked_arithmetic!(i8, i16, i32, i64, i128, isize, u8, u16, u32, u64, u128, usize); diff --git a/rbpf/src/memory_management.rs b/rbpf/src/memory_management.rs new file mode 100644 index 00000000000000..9c7bac5e06c7d9 --- /dev/null +++ b/rbpf/src/memory_management.rs @@ -0,0 +1,167 @@ +// Copyright 2022 Solana Maintainers +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +#![cfg_attr(target_os = "windows", allow(dead_code))] + +use crate::error::EbpfError; + +#[cfg(not(target_os = "windows"))] +extern crate libc; +#[cfg(not(target_os = "windows"))] +use libc::c_void; + +#[cfg(target_os = "windows")] +use winapi::{ + ctypes::c_void, + shared::minwindef, + um::{ + errhandlingapi::GetLastError, + memoryapi::{VirtualAlloc, VirtualFree, VirtualProtect}, + sysinfoapi::{GetSystemInfo, SYSTEM_INFO}, + winnt, + }, +}; + +#[cfg(not(target_os = "windows"))] +macro_rules! libc_error_guard { + (succeeded?, mmap, $addr:expr, $($arg:expr),*) => {{ + *$addr = libc::mmap(*$addr, $($arg),*); + *$addr != libc::MAP_FAILED + }}; + (succeeded?, $function:ident, $($arg:expr),*) => { + libc::$function($($arg),*) == 0 + }; + ($function:ident, $($arg:expr),* $(,)?) => {{ + const RETRY_COUNT: usize = 3; + for i in 0..RETRY_COUNT { + if libc_error_guard!(succeeded?, $function, $($arg),*) { + break; + } else if i.saturating_add(1) == RETRY_COUNT { + let args = vec![$(format!("{:?}", $arg)),*]; + #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))] + let errno = *libc::__error(); + #[cfg(any(target_os = "android", target_os = "netbsd", target_os = "openbsd"))] + let errno = *libc::__errno(); + #[cfg(target_os = "linux")] + let errno = *libc::__errno_location(); + return Err(EbpfError::LibcInvocationFailed(stringify!($function), args, errno)); + } + } + }}; +} + +#[cfg(target_os = "windows")] +macro_rules! winapi_error_guard { + (succeeded?, VirtualAlloc, $addr:expr, $($arg:expr),*) => {{ + *$addr = VirtualAlloc(*$addr, $($arg),*); + !(*$addr).is_null() + }}; + (succeeded?, $function:ident, $($arg:expr),*) => { + $function($($arg),*) != 0 + }; + ($function:ident, $($arg:expr),* $(,)?) => {{ + if !winapi_error_guard!(succeeded?, $function, $($arg),*) { + let args = vec![$(format!("{:?}", $arg)),*]; + let errno = GetLastError(); + return Err(EbpfError::LibcInvocationFailed(stringify!($function), args, errno as i32)); + } + }}; +} + +pub fn get_system_page_size() -> usize { + #[cfg(not(target_os = "windows"))] + unsafe { + libc::sysconf(libc::_SC_PAGESIZE) as usize + } + #[cfg(target_os = "windows")] + unsafe { + let mut system_info: SYSTEM_INFO = std::mem::zeroed(); + GetSystemInfo(&mut system_info); + system_info.dwPageSize as usize + } +} + +pub fn round_to_page_size(value: usize, page_size: usize) -> usize { + value + .saturating_add(page_size) + .saturating_sub(1) + .checked_div(page_size) + .unwrap() + .saturating_mul(page_size) +} + +pub unsafe fn allocate_pages(size_in_bytes: usize) -> Result<*mut u8, EbpfError> { + let mut raw: *mut c_void = std::ptr::null_mut(); + #[cfg(not(target_os = "windows"))] + libc_error_guard!( + mmap, + &mut raw, + size_in_bytes, + libc::PROT_READ | libc::PROT_WRITE, + libc::MAP_ANONYMOUS | libc::MAP_PRIVATE, + -1, + 0, + ); + #[cfg(target_os = "windows")] + winapi_error_guard!( + VirtualAlloc, + &mut raw, + size_in_bytes, + winnt::MEM_RESERVE | winnt::MEM_COMMIT, + winnt::PAGE_READWRITE, + ); + Ok(raw as *mut u8) +} + +pub unsafe fn free_pages(raw: *mut u8, size_in_bytes: usize) -> Result<(), EbpfError> { + #[cfg(not(target_os = "windows"))] + libc_error_guard!(munmap, raw as *mut _, size_in_bytes); + #[cfg(target_os = "windows")] + winapi_error_guard!( + VirtualFree, + raw as *mut _, + size_in_bytes, + winnt::MEM_RELEASE, // winnt::MEM_DECOMMIT + ); + Ok(()) +} + +pub unsafe fn protect_pages( + raw: *mut u8, + size_in_bytes: usize, + executable_flag: bool, +) -> Result<(), EbpfError> { + #[cfg(not(target_os = "windows"))] + { + libc_error_guard!( + mprotect, + raw as *mut _, + size_in_bytes, + if executable_flag { + libc::PROT_EXEC | libc::PROT_READ + } else { + libc::PROT_READ + }, + ); + } + #[cfg(target_os = "windows")] + { + let mut old: minwindef::DWORD = 0; + let ptr_old: *mut minwindef::DWORD = &mut old; + winapi_error_guard!( + VirtualProtect, + raw as *mut _, + size_in_bytes, + if executable_flag { + winnt::PAGE_EXECUTE_READ + } else { + winnt::PAGE_READONLY + }, + ptr_old, + ); + } + Ok(()) +} diff --git a/rbpf/src/memory_region.rs b/rbpf/src/memory_region.rs new file mode 100644 index 00000000000000..260596ccb85075 --- /dev/null +++ b/rbpf/src/memory_region.rs @@ -0,0 +1,1822 @@ +//! This module defines memory regions + +use crate::{ + aligned_memory::Pod, + ebpf, + error::{EbpfError, ProgramResult}, + program::SBPFVersion, + vm::Config, +}; +use std::{ + array, + cell::{Cell, UnsafeCell}, + fmt, mem, + ops::Range, + ptr::{self, copy_nonoverlapping}, +}; + +/* Explaination of the Gapped Memory + + The MemoryMapping supports a special mapping mode which is used for the stack MemoryRegion. + In this mode the backing address space of the host is sliced in power-of-two aligned frames. + The exponent of this alignment is specified in vm_gap_shift. Then the virtual address space + of the guest is spread out in a way which leaves gapes, the same size as the frames, in + between the frames. This effectively doubles the size of the guests virtual address space. + But the acutual mapped memory stays the same, as the gaps are not mapped and accessing them + results in an AccessViolation. + + Guest: frame 0 | gap 0 | frame 1 | gap 1 | frame 2 | gap 2 | ... + | / / + | *----* *------------* + | / / + Host: frame 0 | frame 1 | frame 2 | ... +*/ + +/// The state of a memory region. +#[derive(Debug, Copy, Clone, Default, Eq, PartialEq)] +pub enum MemoryState { + /// The memory region is readable + #[default] + Readable, + /// The memory region is writable + Writable, + /// The memory region is writable but must be copied before writing. The + /// carried data can be used to uniquely identify the region. + Cow(u64), +} + +/// Callback executed when a CoW memory region is written to +pub type MemoryCowCallback = Box Result>; + +/// Memory region for bounds checking and address translation +#[derive(Default, Eq, PartialEq)] +#[repr(C, align(32))] +pub struct MemoryRegion { + /// start host address + pub host_addr: Cell, + /// start virtual address + pub vm_addr: u64, + /// end virtual address + pub vm_addr_end: u64, + /// Length in bytes + pub len: u64, + /// Size of regular gaps as bit shift (63 means this region is continuous) + pub vm_gap_shift: u8, + /// Whether the region is readonly, writable or must be copied before writing + pub state: Cell, +} + +impl MemoryRegion { + fn new(slice: &[u8], vm_addr: u64, vm_gap_size: u64, state: MemoryState) -> Self { + let mut vm_addr_end = vm_addr.saturating_add(slice.len() as u64); + let mut vm_gap_shift = (std::mem::size_of::() as u8) + .saturating_mul(8) + .saturating_sub(1); + if vm_gap_size > 0 { + vm_addr_end = vm_addr_end.saturating_add(slice.len() as u64); + vm_gap_shift = vm_gap_shift.saturating_sub(vm_gap_size.leading_zeros() as u8); + debug_assert_eq!(Some(vm_gap_size), 1_u64.checked_shl(vm_gap_shift as u32)); + }; + MemoryRegion { + host_addr: Cell::new(slice.as_ptr() as u64), + vm_addr, + vm_addr_end, + len: slice.len() as u64, + vm_gap_shift, + state: Cell::new(state), + } + } + + /// Only to be used in tests and benches + pub fn new_for_testing( + slice: &[u8], + vm_addr: u64, + vm_gap_size: u64, + state: MemoryState, + ) -> Self { + Self::new(slice, vm_addr, vm_gap_size, state) + } + + /// Creates a new readonly MemoryRegion from a slice + pub fn new_readonly(slice: &[u8], vm_addr: u64) -> Self { + Self::new(slice, vm_addr, 0, MemoryState::Readable) + } + + /// Creates a new writable MemoryRegion from a mutable slice + pub fn new_writable(slice: &mut [u8], vm_addr: u64) -> Self { + Self::new( + unsafe { std::mem::transmute::<&mut [u8], &[u8]>(slice) }, + vm_addr, + 0, + MemoryState::Writable, + ) + } + + /// Creates a new copy on write MemoryRegion. + /// + /// The region is made writable + pub fn new_cow(slice: &[u8], vm_addr: u64, cow_id: u64) -> Self { + Self::new(slice, vm_addr, 0, MemoryState::Cow(cow_id)) + } + + /// Creates a new writable gapped MemoryRegion from a mutable slice + pub fn new_writable_gapped(slice: &mut [u8], vm_addr: u64, vm_gap_size: u64) -> Self { + Self::new( + unsafe { std::mem::transmute::<&mut [u8], &[u8]>(slice) }, + vm_addr, + vm_gap_size, + MemoryState::Writable, + ) + } + + /// Convert a virtual machine address into a host address + pub fn vm_to_host(&self, vm_addr: u64, len: u64) -> ProgramResult { + // This can happen if a region starts at an offset from the base region + // address, eg with rodata regions if config.optimize_rodata = true, see + // Elf::get_ro_region. + if vm_addr < self.vm_addr { + return ProgramResult::Err(EbpfError::InvalidVirtualAddress(vm_addr)); + } + + let begin_offset = vm_addr.saturating_sub(self.vm_addr); + let is_in_gap = (begin_offset + .checked_shr(self.vm_gap_shift as u32) + .unwrap_or(0) + & 1) + == 1; + let gap_mask = (-1i64).checked_shl(self.vm_gap_shift as u32).unwrap_or(0) as u64; + let gapped_offset = + (begin_offset & gap_mask).checked_shr(1).unwrap_or(0) | (begin_offset & !gap_mask); + if let Some(end_offset) = gapped_offset.checked_add(len) { + if end_offset <= self.len && !is_in_gap { + return ProgramResult::Ok(self.host_addr.get().saturating_add(gapped_offset)); + } + } + ProgramResult::Err(EbpfError::InvalidVirtualAddress(vm_addr)) + } +} + +impl fmt::Debug for MemoryRegion { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "host_addr: {:#x?}-{:#x?}, vm_addr: {:#x?}-{:#x?}, len: {}", + self.host_addr, + self.host_addr.get().saturating_add(self.len), + self.vm_addr, + self.vm_addr_end, + self.len + ) + } +} +impl std::cmp::PartialOrd for MemoryRegion { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} +impl std::cmp::Ord for MemoryRegion { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.vm_addr.cmp(&other.vm_addr) + } +} + +/// Type of memory access +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum AccessType { + /// Read + Load, + /// Write + Store, +} + +/// Memory mapping based on eytzinger search. +pub struct UnalignedMemoryMapping<'a> { + /// Mapped memory regions + regions: Box<[MemoryRegion]>, + /// Copy of the regions vm_addr fields to improve cache density + region_addresses: Box<[u64]>, + /// Cache of the last `MappingCache::SIZE` vm_addr => region_index lookups + cache: UnsafeCell, + /// VM configuration + config: &'a Config, + /// Executable sbpf_version + sbpf_version: &'a SBPFVersion, + /// CoW callback + cow_cb: Option, +} + +impl<'a> fmt::Debug for UnalignedMemoryMapping<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("UnalignedMemoryMapping") + .field("regions", &self.regions) + .field("region_addresses", &self.region_addresses) + .field("cache", &self.cache) + .field("config", &self.config) + .field( + "cow_cb", + &self + .cow_cb + .as_ref() + .map(|cb| format!("Some({:p})", &cb)) + .unwrap_or_else(|| "None".to_string()), + ) + .finish() + } +} + +impl<'a> UnalignedMemoryMapping<'a> { + fn construct_eytzinger_order( + &mut self, + ascending_regions: &mut [MemoryRegion], + mut in_index: usize, + out_index: usize, + ) -> usize { + if out_index >= self.regions.len() { + return in_index; + } + in_index = self.construct_eytzinger_order( + ascending_regions, + in_index, + out_index.saturating_mul(2).saturating_add(1), + ); + self.regions[out_index] = mem::take(&mut ascending_regions[in_index]); + self.region_addresses[out_index] = self.regions[out_index].vm_addr; + self.construct_eytzinger_order( + ascending_regions, + in_index.saturating_add(1), + out_index.saturating_mul(2).saturating_add(2), + ) + } + + fn new_internal( + mut regions: Vec, + cow_cb: Option, + config: &'a Config, + sbpf_version: &'a SBPFVersion, + ) -> Result { + regions.sort(); + for index in 1..regions.len() { + let first = ®ions[index.saturating_sub(1)]; + let second = ®ions[index]; + if first.vm_addr_end > second.vm_addr { + return Err(EbpfError::InvalidMemoryRegion(index)); + } + } + + let mut result = Self { + regions: (0..regions.len()) + .map(|_| MemoryRegion::default()) + .collect::>() + .into_boxed_slice(), + region_addresses: vec![0; regions.len()].into_boxed_slice(), + cache: UnsafeCell::new(MappingCache::new()), + config, + sbpf_version, + cow_cb, + }; + result.construct_eytzinger_order(&mut regions, 0, 0); + Ok(result) + } + + /// Creates a new UnalignedMemoryMapping structure from the given regions + pub fn new( + regions: Vec, + config: &'a Config, + sbpf_version: &'a SBPFVersion, + ) -> Result { + Self::new_internal(regions, None, config, sbpf_version) + } + + /// Creates a new UnalignedMemoryMapping from the given regions. + /// + /// `cow_cb` is used to copy CoW regions on the first write access. + pub fn new_with_cow( + regions: Vec, + cow_cb: MemoryCowCallback, + config: &'a Config, + sbpf_version: &'a SBPFVersion, + ) -> Result { + Self::new_internal(regions, Some(cow_cb), config, sbpf_version) + } + + #[allow(clippy::arithmetic_side_effects)] + fn find_region(&self, cache: &mut MappingCache, vm_addr: u64) -> Option<&MemoryRegion> { + if let Some(index) = cache.find(vm_addr) { + // Safety: + // Cached index, we validated it before caching it. See the corresponding safety section + // in the miss branch. + Some(unsafe { self.regions.get_unchecked(index - 1) }) + } else { + let mut index = 1; + while index <= self.region_addresses.len() { + // Safety: + // we start the search at index=1 and in the loop condition check + // for index <= len, so bound checks can be avoided + index = (index << 1) + + unsafe { *self.region_addresses.get_unchecked(index - 1) <= vm_addr } + as usize; + } + index >>= index.trailing_zeros() + 1; + if index == 0 { + return None; + } + // Safety: + // we check for index==0 above, and by construction if we get here index + // must be contained in region + let region = unsafe { self.regions.get_unchecked(index - 1) }; + cache.insert(region.vm_addr..region.vm_addr_end, index); + Some(region) + } + } + + /// Given a list of regions translate from virtual machine to host address + pub fn map(&self, access_type: AccessType, vm_addr: u64, len: u64) -> ProgramResult { + // Safety: + // &mut references to the mapping cache are only created internally from methods that do not + // invoke each other. UnalignedMemoryMapping is !Sync, so the cache reference below is + // guaranteed to be unique. + let cache = unsafe { &mut *self.cache.get() }; + + let region = match self.find_region(cache, vm_addr) { + Some(res) => res, + None => { + return generate_access_violation( + self.config, + self.sbpf_version, + access_type, + vm_addr, + len, + ) + } + }; + + if access_type == AccessType::Load || ensure_writable_region(region, &self.cow_cb) { + if let ProgramResult::Ok(host_addr) = region.vm_to_host(vm_addr, len) { + return ProgramResult::Ok(host_addr); + } + } + + generate_access_violation(self.config, self.sbpf_version, access_type, vm_addr, len) + } + + /// Loads `size_of::()` bytes from the given address. + /// + /// See [MemoryMapping::load]. + #[inline(always)] + pub fn load>(&self, mut vm_addr: u64) -> ProgramResult { + let mut len = mem::size_of::() as u64; + debug_assert!(len <= mem::size_of::() as u64); + + // Safety: + // &mut references to the mapping cache are only created internally from methods that do not + // invoke each other. UnalignedMemoryMapping is !Sync, so the cache reference below is + // guaranteed to be unique. + let cache = unsafe { &mut *self.cache.get() }; + + let mut region = match self.find_region(cache, vm_addr) { + Some(region) => { + if let ProgramResult::Ok(host_addr) = region.vm_to_host(vm_addr, len) { + // fast path + return ProgramResult::Ok(unsafe { + ptr::read_unaligned::(host_addr as *const _).into() + }); + } + + region + } + None => { + return generate_access_violation( + self.config, + self.sbpf_version, + AccessType::Load, + vm_addr, + len, + ) + } + }; + + // slow path + let initial_len = len; + let initial_vm_addr = vm_addr; + let mut value = 0u64; + let mut ptr = &mut value as *mut _ as *mut u8; + + while len > 0 { + let load_len = len.min(region.vm_addr_end.saturating_sub(vm_addr)); + if load_len == 0 { + break; + } + if let ProgramResult::Ok(host_addr) = region.vm_to_host(vm_addr, load_len) { + // Safety: + // we debug_assert!(len <= mem::size_of::()) so we never + // overflow &value + unsafe { + copy_nonoverlapping(host_addr as *const _, ptr, load_len as usize); + ptr = ptr.add(load_len as usize); + }; + len = len.saturating_sub(load_len); + if len == 0 { + return ProgramResult::Ok(value); + } + vm_addr = vm_addr.saturating_add(load_len); + region = match self.find_region(cache, vm_addr) { + Some(region) => region, + None => break, + }; + } else { + break; + } + } + + generate_access_violation( + self.config, + self.sbpf_version, + AccessType::Load, + initial_vm_addr, + initial_len, + ) + } + + /// Store `value` at the given address. + /// + /// See [MemoryMapping::store]. + #[inline] + pub fn store(&self, value: T, mut vm_addr: u64) -> ProgramResult { + let mut len = mem::size_of::() as u64; + + // Safety: + // &mut references to the mapping cache are only created internally from methods that do not + // invoke each other. UnalignedMemoryMapping is !Sync, so the cache reference below is + // guaranteed to be unique. + let cache = unsafe { &mut *self.cache.get() }; + + let mut src = &value as *const _ as *const u8; + + let mut region = match self.find_region(cache, vm_addr) { + Some(region) if ensure_writable_region(region, &self.cow_cb) => { + // fast path + if let ProgramResult::Ok(host_addr) = region.vm_to_host(vm_addr, len) { + // Safety: + // vm_to_host() succeeded so we know there's enough space to + // store `value` + unsafe { ptr::write_unaligned(host_addr as *mut _, value) }; + return ProgramResult::Ok(host_addr); + } + region + } + _ => { + return generate_access_violation( + self.config, + self.sbpf_version, + AccessType::Store, + vm_addr, + len, + ) + } + }; + + // slow path + let initial_len = len; + let initial_vm_addr = vm_addr; + + while len > 0 { + if !ensure_writable_region(region, &self.cow_cb) { + break; + } + + let write_len = len.min(region.vm_addr_end.saturating_sub(vm_addr)); + if write_len == 0 { + break; + } + if let ProgramResult::Ok(host_addr) = region.vm_to_host(vm_addr, write_len) { + // Safety: + // vm_to_host() succeeded so we have enough space for write_len + unsafe { copy_nonoverlapping(src, host_addr as *mut _, write_len as usize) }; + len = len.saturating_sub(write_len); + if len == 0 { + return ProgramResult::Ok(host_addr); + } + src = unsafe { src.add(write_len as usize) }; + vm_addr = vm_addr.saturating_add(write_len); + region = match self.find_region(cache, vm_addr) { + Some(region) => region, + None => break, + }; + } else { + break; + } + } + + generate_access_violation( + self.config, + self.sbpf_version, + AccessType::Store, + initial_vm_addr, + initial_len, + ) + } + + /// Returns the `MemoryRegion` corresponding to the given address. + pub fn region( + &self, + access_type: AccessType, + vm_addr: u64, + ) -> Result<&MemoryRegion, EbpfError> { + // Safety: + // &mut references to the mapping cache are only created internally from methods that do not + // invoke each other. UnalignedMemoryMapping is !Sync, so the cache reference below is + // guaranteed to be unique. + let cache = unsafe { &mut *self.cache.get() }; + if let Some(region) = self.find_region(cache, vm_addr) { + if (region.vm_addr..region.vm_addr_end).contains(&vm_addr) + && (access_type == AccessType::Load || ensure_writable_region(region, &self.cow_cb)) + { + return Ok(region); + } + } + Err( + generate_access_violation(self.config, self.sbpf_version, access_type, vm_addr, 0) + .unwrap_err(), + ) + } + + /// Returns the `MemoryRegion`s in this mapping + pub fn get_regions(&self) -> &[MemoryRegion] { + &self.regions + } + + /// Replaces the `MemoryRegion` at the given index + pub fn replace_region(&mut self, index: usize, region: MemoryRegion) -> Result<(), EbpfError> { + if index >= self.regions.len() || self.regions[index].vm_addr != region.vm_addr { + return Err(EbpfError::InvalidMemoryRegion(index)); + } + self.regions[index] = region; + self.cache.get_mut().flush(); + Ok(()) + } +} + +/// Memory mapping that uses the upper half of an address to identify the +/// underlying memory region. +pub struct AlignedMemoryMapping<'a> { + /// Mapped memory regions + regions: Box<[MemoryRegion]>, + /// VM configuration + config: &'a Config, + /// Executable sbpf_version + sbpf_version: &'a SBPFVersion, + /// CoW callback + cow_cb: Option, +} + +impl<'a> fmt::Debug for AlignedMemoryMapping<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("AlignedMemoryMapping") + .field("regions", &self.regions) + .field("config", &self.config) + .field( + "cow_cb", + &self + .cow_cb + .as_ref() + .map(|cb| format!("Some({:p})", &cb)) + .unwrap_or_else(|| "None".to_string()), + ) + .finish() + } +} + +impl<'a> AlignedMemoryMapping<'a> { + fn new_internal( + mut regions: Vec, + cow_cb: Option, + config: &'a Config, + sbpf_version: &'a SBPFVersion, + ) -> Result { + regions.insert(0, MemoryRegion::new_readonly(&[], 0)); + regions.sort(); + for (index, region) in regions.iter().enumerate() { + if region + .vm_addr + .checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32) + .unwrap_or(0) + != index as u64 + { + return Err(EbpfError::InvalidMemoryRegion(index)); + } + } + Ok(Self { + regions: regions.into_boxed_slice(), + config, + sbpf_version, + cow_cb, + }) + } + + /// Creates a new MemoryMapping structure from the given regions + pub fn new( + regions: Vec, + config: &'a Config, + sbpf_version: &'a SBPFVersion, + ) -> Result { + Self::new_internal(regions, None, config, sbpf_version) + } + + /// Creates a new MemoryMapping structure from the given regions. + /// + /// `cow_cb` is used to copy CoW regions on the first write access. + pub fn new_with_cow( + regions: Vec, + cow_cb: MemoryCowCallback, + config: &'a Config, + sbpf_version: &'a SBPFVersion, + ) -> Result { + Self::new_internal(regions, Some(cow_cb), config, sbpf_version) + } + + /// Given a list of regions translate from virtual machine to host address + pub fn map(&self, access_type: AccessType, vm_addr: u64, len: u64) -> ProgramResult { + let index = vm_addr + .checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32) + .unwrap_or(0) as usize; + if (1..self.regions.len()).contains(&index) { + let region = &self.regions[index]; + if access_type == AccessType::Load || ensure_writable_region(region, &self.cow_cb) { + if let ProgramResult::Ok(host_addr) = region.vm_to_host(vm_addr, len) { + return ProgramResult::Ok(host_addr); + } + } + } + generate_access_violation(self.config, self.sbpf_version, access_type, vm_addr, len) + } + + /// Loads `size_of::()` bytes from the given address. + /// + /// See [MemoryMapping::load]. + #[inline] + pub fn load>(&self, vm_addr: u64) -> ProgramResult { + let len = mem::size_of::() as u64; + match self.map(AccessType::Load, vm_addr, len) { + ProgramResult::Ok(host_addr) => { + ProgramResult::Ok(unsafe { ptr::read_unaligned::(host_addr as *const _) }.into()) + } + err => err, + } + } + + /// Store `value` at the given address. + /// + /// See [MemoryMapping::store]. + #[inline] + pub fn store(&self, value: T, vm_addr: u64) -> ProgramResult { + let len = mem::size_of::() as u64; + debug_assert!(len <= mem::size_of::() as u64); + + match self.map(AccessType::Store, vm_addr, len) { + ProgramResult::Ok(host_addr) => { + // Safety: + // map succeeded so we can write at least `len` bytes + unsafe { + ptr::write_unaligned(host_addr as *mut T, value); + } + ProgramResult::Ok(host_addr) + } + + err => err, + } + } + + /// Returns the `MemoryRegion` corresponding to the given address. + pub fn region( + &self, + access_type: AccessType, + vm_addr: u64, + ) -> Result<&MemoryRegion, EbpfError> { + let index = vm_addr + .checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32) + .unwrap_or(0) as usize; + if (1..self.regions.len()).contains(&index) { + let region = &self.regions[index]; + if (region.vm_addr..region.vm_addr_end).contains(&vm_addr) + && (access_type == AccessType::Load || ensure_writable_region(region, &self.cow_cb)) + { + return Ok(region); + } + } + Err( + generate_access_violation(self.config, self.sbpf_version, access_type, vm_addr, 0) + .unwrap_err(), + ) + } + + /// Returns the `MemoryRegion`s in this mapping + pub fn get_regions(&self) -> &[MemoryRegion] { + &self.regions + } + + /// Replaces the `MemoryRegion` at the given index + pub fn replace_region(&mut self, index: usize, region: MemoryRegion) -> Result<(), EbpfError> { + if index >= self.regions.len() { + return Err(EbpfError::InvalidMemoryRegion(index)); + } + let begin_index = region + .vm_addr + .checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32) + .unwrap_or(0) as usize; + let end_index = region + .vm_addr + .saturating_add(region.len.saturating_sub(1)) + .checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32) + .unwrap_or(0) as usize; + if begin_index != index || end_index != index { + return Err(EbpfError::InvalidMemoryRegion(index)); + } + self.regions[index] = region; + Ok(()) + } +} + +/// Maps virtual memory to host memory. +#[derive(Debug)] +pub enum MemoryMapping<'a> { + /// Used when address translation is disabled + Identity, + /// Aligned memory mapping which uses the upper half of an address to + /// identify the underlying memory region. + Aligned(AlignedMemoryMapping<'a>), + /// Memory mapping that allows mapping unaligned memory regions. + Unaligned(UnalignedMemoryMapping<'a>), +} + +impl<'a> MemoryMapping<'a> { + pub(crate) fn new_identity() -> Self { + MemoryMapping::Identity + } + + /// Creates a new memory mapping. + /// + /// Uses aligned or unaligned memory mapping depending on the value of + /// `config.aligned_memory_mapping=true`. + pub fn new( + regions: Vec, + config: &'a Config, + sbpf_version: &'a SBPFVersion, + ) -> Result { + if config.aligned_memory_mapping { + AlignedMemoryMapping::new(regions, config, sbpf_version).map(MemoryMapping::Aligned) + } else { + UnalignedMemoryMapping::new(regions, config, sbpf_version).map(MemoryMapping::Unaligned) + } + } + + /// Creates a new memory mapping. + /// + /// Uses aligned or unaligned memory mapping depending on the value of + /// `config.aligned_memory_mapping=true`. `cow_cb` is used to copy CoW memory regions. + pub fn new_with_cow( + regions: Vec, + cow_cb: MemoryCowCallback, + config: &'a Config, + sbpf_version: &'a SBPFVersion, + ) -> Result { + if config.aligned_memory_mapping { + AlignedMemoryMapping::new_with_cow(regions, cow_cb, config, sbpf_version) + .map(MemoryMapping::Aligned) + } else { + UnalignedMemoryMapping::new_with_cow(regions, cow_cb, config, sbpf_version) + .map(MemoryMapping::Unaligned) + } + } + + /// Map virtual memory to host memory. + pub fn map(&self, access_type: AccessType, vm_addr: u64, len: u64) -> ProgramResult { + match self { + MemoryMapping::Identity => ProgramResult::Ok(vm_addr), + MemoryMapping::Aligned(m) => m.map(access_type, vm_addr, len), + MemoryMapping::Unaligned(m) => m.map(access_type, vm_addr, len), + } + } + + /// Loads `size_of::()` bytes from the given address. + /// + /// Works across memory region boundaries. + #[inline] + pub fn load>(&self, vm_addr: u64) -> ProgramResult { + match self { + MemoryMapping::Identity => unsafe { + ProgramResult::Ok(ptr::read_unaligned(vm_addr as *const T).into()) + }, + MemoryMapping::Aligned(m) => m.load::(vm_addr), + MemoryMapping::Unaligned(m) => m.load::(vm_addr), + } + } + + /// Store `value` at the given address. + /// + /// Works across memory region boundaries if `len` does not fit within a single region. + #[inline] + pub fn store(&self, value: T, vm_addr: u64) -> ProgramResult { + match self { + MemoryMapping::Identity => unsafe { + ptr::write_unaligned(vm_addr as *mut T, value); + ProgramResult::Ok(0) + }, + MemoryMapping::Aligned(m) => m.store(value, vm_addr), + MemoryMapping::Unaligned(m) => m.store(value, vm_addr), + } + } + + /// Returns the `MemoryRegion` corresponding to the given address. + pub fn region( + &self, + access_type: AccessType, + vm_addr: u64, + ) -> Result<&MemoryRegion, EbpfError> { + match self { + MemoryMapping::Identity => Err(EbpfError::InvalidMemoryRegion(0)), + MemoryMapping::Aligned(m) => m.region(access_type, vm_addr), + MemoryMapping::Unaligned(m) => m.region(access_type, vm_addr), + } + } + + /// Returns the `MemoryRegion`s in this mapping. + pub fn get_regions(&self) -> &[MemoryRegion] { + match self { + MemoryMapping::Identity => &[], + MemoryMapping::Aligned(m) => m.get_regions(), + MemoryMapping::Unaligned(m) => m.get_regions(), + } + } + + /// Replaces the `MemoryRegion` at the given index + pub fn replace_region(&mut self, index: usize, region: MemoryRegion) -> Result<(), EbpfError> { + match self { + MemoryMapping::Identity => Err(EbpfError::InvalidMemoryRegion(index)), + MemoryMapping::Aligned(m) => m.replace_region(index, region), + MemoryMapping::Unaligned(m) => m.replace_region(index, region), + } + } +} + +// Ensure that the given region is writable. +// +// If the region is CoW, cow_cb is called to execute the CoW operation. +fn ensure_writable_region(region: &MemoryRegion, cow_cb: &Option) -> bool { + match (region.state.get(), cow_cb) { + (MemoryState::Writable, _) => true, + (MemoryState::Cow(cow_id), Some(cb)) => match cb(cow_id) { + Ok(host_addr) => { + region.host_addr.replace(host_addr); + region.state.replace(MemoryState::Writable); + true + } + Err(_) => false, + }, + _ => false, + } +} + +/// Helper for map to generate errors +fn generate_access_violation( + config: &Config, + sbpf_version: &SBPFVersion, + access_type: AccessType, + vm_addr: u64, + len: u64, +) -> ProgramResult { + let stack_frame = (vm_addr as i64) + .saturating_sub(ebpf::MM_STACK_START as i64) + .checked_div(config.stack_frame_size as i64) + .unwrap_or(0); + if !sbpf_version.dynamic_stack_frames() + && (-1..(config.max_call_depth as i64).saturating_add(1)).contains(&stack_frame) + { + ProgramResult::Err(EbpfError::StackAccessViolation( + access_type, + vm_addr, + len, + stack_frame, + )) + } else { + let region_name = match vm_addr & (!ebpf::MM_PROGRAM_START.saturating_sub(1)) { + ebpf::MM_PROGRAM_START => "program", + ebpf::MM_STACK_START => "stack", + ebpf::MM_HEAP_START => "heap", + ebpf::MM_INPUT_START => "input", + _ => "unknown", + }; + ProgramResult::Err(EbpfError::AccessViolation( + access_type, + vm_addr, + len, + region_name, + )) + } +} + +/// Fast, small linear cache used to speed up unaligned memory mapping. +#[derive(Debug)] +struct MappingCache { + // The cached entries. + entries: [(Range, usize); MappingCache::SIZE as usize], + // Index of the last accessed memory region. + // + // New entries are written backwards, so that find() can always scan + // forward which is faster. + head: isize, +} + +impl MappingCache { + const SIZE: isize = 4; + + fn new() -> MappingCache { + MappingCache { + entries: array::from_fn(|_| (0..0, 0)), + head: 0, + } + } + + #[allow(clippy::arithmetic_side_effects)] + #[inline] + fn find(&self, vm_addr: u64) -> Option { + for i in 0..Self::SIZE { + let index = (self.head + i) % Self::SIZE; + // Safety: + // index is guaranteed to be between 0..Self::SIZE + let (vm_range, region_index) = unsafe { self.entries.get_unchecked(index as usize) }; + if vm_range.contains(&vm_addr) { + return Some(*region_index); + } + } + + None + } + + #[allow(clippy::arithmetic_side_effects)] + #[inline] + fn insert(&mut self, vm_range: Range, region_index: usize) { + self.head = (self.head - 1).rem_euclid(Self::SIZE); + // Safety: + // self.head is guaranteed to be between 0..Self::SIZE + unsafe { *self.entries.get_unchecked_mut(self.head as usize) = (vm_range, region_index) }; + } + + #[inline] + fn flush(&mut self) { + self.entries = array::from_fn(|_| (0..0, 0)); + self.head = 0; + } +} + +#[cfg(test)] +mod test { + use std::{cell::RefCell, rc::Rc}; + use test_utils::assert_error; + + use super::*; + + #[test] + fn test_mapping_cache() { + let mut cache = MappingCache::new(); + assert_eq!(cache.find(0), None); + + let mut ranges = vec![10u64..20, 20..30, 30..40, 40..50]; + for (region, range) in ranges.iter().cloned().enumerate() { + cache.insert(range, region); + } + for (region, range) in ranges.iter().enumerate() { + if region > 0 { + assert_eq!(cache.find(range.start - 1), Some(region - 1)); + } else { + assert_eq!(cache.find(range.start - 1), None); + } + assert_eq!(cache.find(range.start), Some(region)); + assert_eq!(cache.find(range.start + 1), Some(region)); + assert_eq!(cache.find(range.end - 1), Some(region)); + if region < 3 { + assert_eq!(cache.find(range.end), Some(region + 1)); + } else { + assert_eq!(cache.find(range.end), None); + } + } + + cache.insert(50..60, 4); + ranges.push(50..60); + for (region, range) in ranges.iter().enumerate() { + if region == 0 { + assert_eq!(cache.find(range.start), None); + continue; + } + if region > 1 { + assert_eq!(cache.find(range.start - 1), Some(region - 1)); + } else { + assert_eq!(cache.find(range.start - 1), None); + } + assert_eq!(cache.find(range.start), Some(region)); + assert_eq!(cache.find(range.start + 1), Some(region)); + assert_eq!(cache.find(range.end - 1), Some(region)); + if region < 4 { + assert_eq!(cache.find(range.end), Some(region + 1)); + } else { + assert_eq!(cache.find(range.end), None); + } + } + } + + #[test] + fn test_mapping_cache_flush() { + let mut cache = MappingCache::new(); + assert_eq!(cache.find(0), None); + cache.insert(0..10, 0); + assert_eq!(cache.find(0), Some(0)); + cache.flush(); + assert_eq!(cache.find(0), None); + } + + #[test] + fn test_map_empty() { + let config = Config::default(); + let m = UnalignedMemoryMapping::new(vec![], &config, &SBPFVersion::V2).unwrap(); + assert_error!( + m.map(AccessType::Load, ebpf::MM_INPUT_START, 8), + "AccessViolation" + ); + + let m = AlignedMemoryMapping::new(vec![], &config, &SBPFVersion::V2).unwrap(); + assert_error!( + m.map(AccessType::Load, ebpf::MM_INPUT_START, 8), + "AccessViolation" + ); + } + + #[test] + fn test_gapped_map() { + for aligned_memory_mapping in [false, true] { + let config = Config { + aligned_memory_mapping, + ..Config::default() + }; + let mut mem1 = vec![0xff; 8]; + let m = MemoryMapping::new( + vec![ + MemoryRegion::new_readonly(&[0; 8], ebpf::MM_PROGRAM_START), + MemoryRegion::new_writable_gapped(&mut mem1, ebpf::MM_STACK_START, 2), + ], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + for frame in 0..4 { + let address = ebpf::MM_STACK_START + frame * 4; + assert!(m.region(AccessType::Load, address).is_ok()); + assert!(m.map(AccessType::Load, address, 2).is_ok()); + assert_error!(m.map(AccessType::Load, address + 2, 2), "AccessViolation"); + assert_eq!(m.load::(address).unwrap(), 0xFFFF); + assert_error!(m.load::(address + 2), "AccessViolation"); + assert!(m.store::(0xFFFF, address).is_ok()); + assert_error!(m.store::(0xFFFF, address + 2), "AccessViolation"); + } + } + } + + #[test] + fn test_unaligned_map_overlap() { + let config = Config::default(); + let mem1 = [1, 2, 3, 4]; + let mem2 = [5, 6]; + assert_error!( + UnalignedMemoryMapping::new( + vec![ + MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START), + MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64 - 1), + ], + &config, + &SBPFVersion::V2, + ), + "InvalidMemoryRegion(1)" + ); + assert!(UnalignedMemoryMapping::new( + vec![ + MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START), + MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64), + ], + &config, + &SBPFVersion::V2, + ) + .is_ok()); + } + + #[test] + fn test_unaligned_map() { + let config = Config::default(); + let mut mem1 = [11]; + let mem2 = [22, 22]; + let mem3 = [33]; + let mem4 = [44, 44]; + let m = UnalignedMemoryMapping::new( + vec![ + MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START), + MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64), + MemoryRegion::new_readonly( + &mem3, + ebpf::MM_INPUT_START + (mem1.len() + mem2.len()) as u64, + ), + MemoryRegion::new_readonly( + &mem4, + ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len()) as u64, + ), + ], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + + assert_eq!( + m.map(AccessType::Load, ebpf::MM_INPUT_START, 1).unwrap(), + mem1.as_ptr() as u64 + ); + + assert_eq!( + m.map(AccessType::Store, ebpf::MM_INPUT_START, 1).unwrap(), + mem1.as_ptr() as u64 + ); + + assert_error!( + m.map(AccessType::Load, ebpf::MM_INPUT_START, 2), + "AccessViolation" + ); + + assert_eq!( + m.map( + AccessType::Load, + ebpf::MM_INPUT_START + mem1.len() as u64, + 1, + ) + .unwrap(), + mem2.as_ptr() as u64 + ); + + assert_eq!( + m.map( + AccessType::Load, + ebpf::MM_INPUT_START + (mem1.len() + mem2.len()) as u64, + 1, + ) + .unwrap(), + mem3.as_ptr() as u64 + ); + + assert_eq!( + m.map( + AccessType::Load, + ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len()) as u64, + 1, + ) + .unwrap(), + mem4.as_ptr() as u64 + ); + + assert_error!( + m.map( + AccessType::Load, + ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len() + mem4.len()) as u64, + 1, + ), + "AccessViolation" + ); + } + + #[test] + fn test_unaligned_region() { + let config = Config { + aligned_memory_mapping: false, + ..Config::default() + }; + + let mut mem1 = vec![0xFF; 4]; + let mem2 = vec![0xDD; 4]; + let m = MemoryMapping::new( + vec![ + MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START), + MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + 4), + ], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + assert_error!( + m.region(AccessType::Load, ebpf::MM_INPUT_START - 1), + "AccessViolation" + ); + assert_eq!( + m.region(AccessType::Load, ebpf::MM_INPUT_START) + .unwrap() + .host_addr + .get(), + mem1.as_ptr() as u64 + ); + assert_eq!( + m.region(AccessType::Load, ebpf::MM_INPUT_START + 3) + .unwrap() + .host_addr + .get(), + mem1.as_ptr() as u64 + ); + assert_error!( + m.region(AccessType::Store, ebpf::MM_INPUT_START + 4), + "AccessViolation" + ); + assert_eq!( + m.region(AccessType::Load, ebpf::MM_INPUT_START + 4) + .unwrap() + .host_addr + .get(), + mem2.as_ptr() as u64 + ); + assert_eq!( + m.region(AccessType::Load, ebpf::MM_INPUT_START + 7) + .unwrap() + .host_addr + .get(), + mem2.as_ptr() as u64 + ); + assert_error!( + m.region(AccessType::Load, ebpf::MM_INPUT_START + 8), + "AccessViolation" + ); + } + + #[test] + fn test_aligned_region() { + let config = Config { + aligned_memory_mapping: true, + ..Config::default() + }; + + let mut mem1 = vec![0xFF; 4]; + let mem2 = vec![0xDD; 4]; + let m = MemoryMapping::new( + vec![ + MemoryRegion::new_writable(&mut mem1, ebpf::MM_PROGRAM_START), + MemoryRegion::new_readonly(&mem2, ebpf::MM_STACK_START), + ], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + assert_error!( + m.region(AccessType::Load, ebpf::MM_PROGRAM_START - 1), + "AccessViolation" + ); + assert_eq!( + m.region(AccessType::Load, ebpf::MM_PROGRAM_START) + .unwrap() + .host_addr + .get(), + mem1.as_ptr() as u64 + ); + assert_eq!( + m.region(AccessType::Load, ebpf::MM_PROGRAM_START + 3) + .unwrap() + .host_addr + .get(), + mem1.as_ptr() as u64 + ); + assert_error!( + m.region(AccessType::Load, ebpf::MM_PROGRAM_START + 4), + "AccessViolation" + ); + + assert_error!( + m.region(AccessType::Store, ebpf::MM_STACK_START), + "AccessViolation" + ); + assert_eq!( + m.region(AccessType::Load, ebpf::MM_STACK_START) + .unwrap() + .host_addr + .get(), + mem2.as_ptr() as u64 + ); + assert_eq!( + m.region(AccessType::Load, ebpf::MM_STACK_START + 3) + .unwrap() + .host_addr + .get(), + mem2.as_ptr() as u64 + ); + assert_error!( + m.region(AccessType::Load, ebpf::MM_INPUT_START + 4), + "AccessViolation" + ); + } + + #[test] + fn test_unaligned_map_load() { + let config = Config { + aligned_memory_mapping: false, + ..Config::default() + }; + let mem1 = [0x11, 0x22]; + let mem2 = [0x33]; + let mem3 = [0x44, 0x55, 0x66]; + let mem4 = [0x77, 0x88, 0x99]; + let m = MemoryMapping::new( + vec![ + MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START), + MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64), + MemoryRegion::new_readonly( + &mem3, + ebpf::MM_INPUT_START + (mem1.len() + mem2.len()) as u64, + ), + MemoryRegion::new_readonly( + &mem4, + ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len()) as u64, + ), + ], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + + assert_eq!(m.load::(ebpf::MM_INPUT_START).unwrap(), 0x2211); + assert_eq!(m.load::(ebpf::MM_INPUT_START).unwrap(), 0x44332211); + assert_eq!( + m.load::(ebpf::MM_INPUT_START).unwrap(), + 0x8877665544332211 + ); + assert_eq!(m.load::(ebpf::MM_INPUT_START + 1).unwrap(), 0x3322); + assert_eq!(m.load::(ebpf::MM_INPUT_START + 1).unwrap(), 0x55443322); + assert_eq!( + m.load::(ebpf::MM_INPUT_START + 1).unwrap(), + 0x9988776655443322 + ); + } + + #[test] + fn test_unaligned_map_store() { + let config = Config { + aligned_memory_mapping: false, + ..Config::default() + }; + let mut mem1 = vec![0xff, 0xff]; + let mut mem2 = vec![0xff]; + let mut mem3 = vec![0xff, 0xff, 0xff]; + let mut mem4 = vec![0xff, 0xff]; + let m = MemoryMapping::new( + vec![ + MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START), + MemoryRegion::new_writable(&mut mem2, ebpf::MM_INPUT_START + mem1.len() as u64), + MemoryRegion::new_writable( + &mut mem3, + ebpf::MM_INPUT_START + (mem1.len() + mem2.len()) as u64, + ), + MemoryRegion::new_writable( + &mut mem4, + ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len()) as u64, + ), + ], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + m.store(0x1122u16, ebpf::MM_INPUT_START).unwrap(); + assert_eq!(m.load::(ebpf::MM_INPUT_START).unwrap(), 0x1122); + + m.store(0x33445566u32, ebpf::MM_INPUT_START).unwrap(); + assert_eq!(m.load::(ebpf::MM_INPUT_START).unwrap(), 0x33445566); + + m.store(0x778899AABBCCDDEEu64, ebpf::MM_INPUT_START) + .unwrap(); + assert_eq!( + m.load::(ebpf::MM_INPUT_START).unwrap(), + 0x778899AABBCCDDEE + ); + } + + #[test] + fn test_unaligned_map_load_store_fast_paths() { + let config = Config { + aligned_memory_mapping: false, + ..Config::default() + }; + let mut mem1 = vec![0xff; 8]; + let m = MemoryMapping::new( + vec![MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START)], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + + m.store(0x1122334455667788u64, ebpf::MM_INPUT_START) + .unwrap(); + assert_eq!( + m.load::(ebpf::MM_INPUT_START).unwrap(), + 0x1122334455667788 + ); + m.store(0x22334455u32, ebpf::MM_INPUT_START).unwrap(); + assert_eq!(m.load::(ebpf::MM_INPUT_START).unwrap(), 0x22334455); + + m.store(0x3344u16, ebpf::MM_INPUT_START).unwrap(); + assert_eq!(m.load::(ebpf::MM_INPUT_START).unwrap(), 0x3344); + + m.store(0x55u8, ebpf::MM_INPUT_START).unwrap(); + assert_eq!(m.load::(ebpf::MM_INPUT_START).unwrap(), 0x55); + } + + #[test] + fn test_unaligned_map_load_store_slow_paths() { + let config = Config { + aligned_memory_mapping: false, + ..Config::default() + }; + let mut mem1 = vec![0xff; 7]; + let mut mem2 = vec![0xff]; + let m = MemoryMapping::new( + vec![ + MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START), + MemoryRegion::new_writable(&mut mem2, ebpf::MM_INPUT_START + 7), + ], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + + m.store(0x1122334455667788u64, ebpf::MM_INPUT_START) + .unwrap(); + assert_eq!( + m.load::(ebpf::MM_INPUT_START).unwrap(), + 0x1122334455667788 + ); + m.store(0xAABBCCDDu32, ebpf::MM_INPUT_START + 4).unwrap(); + assert_eq!(m.load::(ebpf::MM_INPUT_START + 4).unwrap(), 0xAABBCCDD); + + m.store(0xEEFFu16, ebpf::MM_INPUT_START + 6).unwrap(); + assert_eq!(m.load::(ebpf::MM_INPUT_START + 6).unwrap(), 0xEEFF); + } + + #[test] + fn test_unaligned_map_store_out_of_bounds() { + let config = Config { + aligned_memory_mapping: false, + ..Config::default() + }; + + let mut mem1 = vec![0xFF]; + let m = MemoryMapping::new( + vec![MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START)], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + m.store(0x11u8, ebpf::MM_INPUT_START).unwrap(); + assert_error!(m.store(0x11u8, ebpf::MM_INPUT_START - 1), "AccessViolation"); + assert_error!(m.store(0x11u8, ebpf::MM_INPUT_START + 1), "AccessViolation"); + // this gets us line coverage for the case where we're completely + // outside the address space (the case above is just on the edge) + assert_error!(m.store(0x11u8, ebpf::MM_INPUT_START + 2), "AccessViolation"); + + let mut mem1 = vec![0xFF; 4]; + let mut mem2 = vec![0xDD; 4]; + let m = MemoryMapping::new( + vec![ + MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START), + MemoryRegion::new_writable(&mut mem2, ebpf::MM_INPUT_START + 4), + ], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + m.store(0x1122334455667788u64, ebpf::MM_INPUT_START) + .unwrap(); + assert_eq!( + m.load::(ebpf::MM_INPUT_START).unwrap(), + 0x1122334455667788u64 + ); + assert_error!( + m.store(0x1122334455667788u64, ebpf::MM_INPUT_START + 1), + "AccessViolation" + ); + } + + #[test] + fn test_unaligned_map_load_out_of_bounds() { + let config = Config { + aligned_memory_mapping: false, + ..Config::default() + }; + + let mem1 = vec![0xff]; + let m = MemoryMapping::new( + vec![MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START)], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + assert_eq!(m.load::(ebpf::MM_INPUT_START).unwrap(), 0xff); + assert_error!(m.load::(ebpf::MM_INPUT_START - 1), "AccessViolation"); + assert_error!(m.load::(ebpf::MM_INPUT_START + 1), "AccessViolation"); + assert_error!(m.load::(ebpf::MM_INPUT_START + 2), "AccessViolation"); + + let mem1 = vec![0xFF; 4]; + let mem2 = vec![0xDD; 4]; + let m = MemoryMapping::new( + vec![ + MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START), + MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + 4), + ], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + assert_eq!( + m.load::(ebpf::MM_INPUT_START).unwrap(), + 0xDDDDDDDDFFFFFFFF + ); + assert_error!(m.load::(ebpf::MM_INPUT_START + 1), "AccessViolation"); + } + + #[test] + #[should_panic(expected = "AccessViolation")] + fn test_store_readonly() { + let config = Config { + aligned_memory_mapping: false, + ..Config::default() + }; + let mut mem1 = vec![0xff, 0xff]; + let mem2 = vec![0xff, 0xff]; + let m = MemoryMapping::new( + vec![ + MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START), + MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64), + ], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + m.store(0x11223344, ebpf::MM_INPUT_START).unwrap(); + } + + #[test] + fn test_unaligned_map_replace_region() { + let config = Config::default(); + let mem1 = [11]; + let mem2 = [22, 22]; + let mem3 = [33]; + let mut m = UnalignedMemoryMapping::new( + vec![ + MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START), + MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64), + ], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + + assert_eq!( + m.map(AccessType::Load, ebpf::MM_INPUT_START, 1).unwrap(), + mem1.as_ptr() as u64 + ); + + assert_eq!( + m.map( + AccessType::Load, + ebpf::MM_INPUT_START + mem1.len() as u64, + 1, + ) + .unwrap(), + mem2.as_ptr() as u64 + ); + + assert_error!( + m.replace_region( + 2, + MemoryRegion::new_readonly(&mem3, ebpf::MM_INPUT_START + mem1.len() as u64) + ), + "InvalidMemoryRegion(2)" + ); + + let region_index = m + .get_regions() + .iter() + .position(|mem| mem.vm_addr == ebpf::MM_INPUT_START + mem1.len() as u64) + .unwrap(); + + // old.vm_addr != new.vm_addr + assert_error!( + m.replace_region( + region_index, + MemoryRegion::new_readonly(&mem3, ebpf::MM_INPUT_START + mem1.len() as u64 + 1) + ), + "InvalidMemoryRegion({})", + region_index + ); + + m.replace_region( + region_index, + MemoryRegion::new_readonly(&mem3, ebpf::MM_INPUT_START + mem1.len() as u64), + ) + .unwrap(); + + assert_eq!( + m.map( + AccessType::Load, + ebpf::MM_INPUT_START + mem1.len() as u64, + 1, + ) + .unwrap(), + mem3.as_ptr() as u64 + ); + } + + #[test] + fn test_aligned_map_replace_region() { + let config = Config::default(); + let mem1 = [11]; + let mem2 = [22, 22]; + let mem3 = [33, 33]; + let mut m = AlignedMemoryMapping::new( + vec![ + MemoryRegion::new_readonly(&mem1, ebpf::MM_PROGRAM_START), + MemoryRegion::new_readonly(&mem2, ebpf::MM_STACK_START), + ], + &config, + &SBPFVersion::V2, + ) + .unwrap(); + + assert_eq!( + m.map(AccessType::Load, ebpf::MM_STACK_START, 1).unwrap(), + mem2.as_ptr() as u64 + ); + + // index > regions.len() + assert_error!( + m.replace_region(3, MemoryRegion::new_readonly(&mem3, ebpf::MM_STACK_START)), + "InvalidMemoryRegion(3)" + ); + + // index != addr >> VIRTUAL_ADDRESS_BITS + assert_error!( + m.replace_region(2, MemoryRegion::new_readonly(&mem3, ebpf::MM_HEAP_START)), + "InvalidMemoryRegion(2)" + ); + + // index + len != addr >> VIRTUAL_ADDRESS_BITS + assert_error!( + m.replace_region( + 2, + MemoryRegion::new_readonly(&mem3, ebpf::MM_HEAP_START - 1) + ), + "InvalidMemoryRegion(2)" + ); + + m.replace_region(2, MemoryRegion::new_readonly(&mem3, ebpf::MM_STACK_START)) + .unwrap(); + + assert_eq!( + m.map(AccessType::Load, ebpf::MM_STACK_START, 1).unwrap(), + mem3.as_ptr() as u64 + ); + } + + #[test] + fn test_cow_map() { + for aligned_memory_mapping in [true, false] { + let config = Config { + aligned_memory_mapping, + ..Config::default() + }; + let original = [11, 22]; + let copied = Rc::new(RefCell::new(Vec::new())); + + let c = Rc::clone(&copied); + let m = MemoryMapping::new_with_cow( + vec![MemoryRegion::new_cow(&original, ebpf::MM_PROGRAM_START, 42)], + Box::new(move |_| { + c.borrow_mut().extend_from_slice(&original); + Ok(c.borrow().as_slice().as_ptr() as u64) + }), + &config, + &SBPFVersion::V2, + ) + .unwrap(); + + assert_eq!( + m.map(AccessType::Load, ebpf::MM_PROGRAM_START, 1).unwrap(), + original.as_ptr() as u64 + ); + assert_eq!( + m.map(AccessType::Store, ebpf::MM_PROGRAM_START, 1).unwrap(), + copied.borrow().as_ptr() as u64 + ); + } + } + + #[test] + fn test_cow_load_store() { + for aligned_memory_mapping in [true, false] { + let config = Config { + aligned_memory_mapping, + ..Config::default() + }; + let original = [11, 22]; + let copied = Rc::new(RefCell::new(Vec::new())); + + let c = Rc::clone(&copied); + let m = MemoryMapping::new_with_cow( + vec![MemoryRegion::new_cow(&original, ebpf::MM_PROGRAM_START, 42)], + Box::new(move |_| { + c.borrow_mut().extend_from_slice(&original); + Ok(c.borrow().as_slice().as_ptr() as u64) + }), + &config, + &SBPFVersion::V2, + ) + .unwrap(); + + assert_eq!( + m.map(AccessType::Load, ebpf::MM_PROGRAM_START, 1).unwrap(), + original.as_ptr() as u64 + ); + + assert_eq!(m.load::(ebpf::MM_PROGRAM_START).unwrap(), 11); + assert_eq!(m.load::(ebpf::MM_PROGRAM_START + 1).unwrap(), 22); + assert!(copied.borrow().is_empty()); + + m.store(33u8, ebpf::MM_PROGRAM_START).unwrap(); + assert_eq!(original[0], 11); + assert_eq!(m.load::(ebpf::MM_PROGRAM_START).unwrap(), 33); + assert_eq!(m.load::(ebpf::MM_PROGRAM_START + 1).unwrap(), 22); + } + } + + #[test] + fn test_cow_region_id() { + for aligned_memory_mapping in [true, false] { + let config = Config { + aligned_memory_mapping, + ..Config::default() + }; + let original1 = [11, 22]; + let original2 = [33, 44]; + let copied = Rc::new(RefCell::new(Vec::new())); + + let c = Rc::clone(&copied); + let m = MemoryMapping::new_with_cow( + vec![ + MemoryRegion::new_cow(&original1, ebpf::MM_PROGRAM_START, 42), + MemoryRegion::new_cow(&original2, ebpf::MM_PROGRAM_START + 0x100000000, 24), + ], + Box::new(move |id| { + // check that the argument passed to MemoryRegion::new_cow is then passed to the + // callback + assert_eq!(id, 42); + c.borrow_mut().extend_from_slice(&original1); + Ok(c.borrow().as_slice().as_ptr() as u64) + }), + &config, + &SBPFVersion::V2, + ) + .unwrap(); + + m.store(55u8, ebpf::MM_PROGRAM_START).unwrap(); + assert_eq!(original1[0], 11); + assert_eq!(m.load::(ebpf::MM_PROGRAM_START).unwrap(), 55); + } + } + + #[test] + #[should_panic(expected = "AccessViolation")] + fn test_map_cow_error() { + let config = Config::default(); + let original = [11, 22]; + + let m = MemoryMapping::new_with_cow( + vec![MemoryRegion::new_cow(&original, ebpf::MM_PROGRAM_START, 42)], + Box::new(|_| Err(())), + &config, + &SBPFVersion::V2, + ) + .unwrap(); + + m.map(AccessType::Store, ebpf::MM_PROGRAM_START, 1).unwrap(); + } + + #[test] + #[should_panic(expected = "AccessViolation")] + fn test_store_cow_error() { + let config = Config::default(); + let original = [11, 22]; + + let m = MemoryMapping::new_with_cow( + vec![MemoryRegion::new_cow(&original, ebpf::MM_PROGRAM_START, 42)], + Box::new(|_| Err(())), + &config, + &SBPFVersion::V2, + ) + .unwrap(); + + m.store(33u8, ebpf::MM_PROGRAM_START).unwrap(); + } +} diff --git a/rbpf/src/program.rs b/rbpf/src/program.rs new file mode 100644 index 00000000000000..3e6aaef2a4079a --- /dev/null +++ b/rbpf/src/program.rs @@ -0,0 +1,379 @@ +//! Common interface for built-in and user supplied programs +use { + crate::{ + ebpf, + elf::ElfError, + vm::{Config, ContextObject, EbpfVm}, + }, + std::collections::{btree_map::Entry, BTreeMap}, +}; + +/// Defines a set of sbpf_version of an executable +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum SBPFVersion { + /// The legacy format + V1, + /// The current format + V2, + /// The future format with BTF support + V3, +} + +impl SBPFVersion { + /// Enable the little-endian byte swap instructions + pub fn enable_le(&self) -> bool { + self == &SBPFVersion::V1 + } + + /// Enable the negation instruction + pub fn enable_neg(&self) -> bool { + self == &SBPFVersion::V1 + } + + /// Swaps the reg and imm operands of the subtraction instruction + pub fn swap_sub_reg_imm_operands(&self) -> bool { + self != &SBPFVersion::V1 + } + + /// Disable the only two slots long instruction: LD_DW_IMM + pub fn disable_lddw(&self) -> bool { + self != &SBPFVersion::V1 + } + + /// Enable the BPF_PQR instruction class + pub fn enable_pqr(&self) -> bool { + self != &SBPFVersion::V1 + } + + /// Use src reg instead of imm in callx + pub fn callx_uses_src_reg(&self) -> bool { + self != &SBPFVersion::V1 + } + + /// Ensure that rodata sections don't exceed their maximum allowed size and + /// overlap with the stack + pub fn reject_rodata_stack_overlap(&self) -> bool { + self != &SBPFVersion::V1 + } + + /// Allow sh_addr != sh_offset in elf sections. Used in V2 to align + /// section vaddrs to MM_PROGRAM_START. + pub fn enable_elf_vaddr(&self) -> bool { + self != &SBPFVersion::V1 + } + + /// Use dynamic stack frame sizes + pub fn dynamic_stack_frames(&self) -> bool { + self != &SBPFVersion::V1 + } + + /// Support syscalls via pseudo calls (insn.src = 0) + pub fn static_syscalls(&self) -> bool { + self != &SBPFVersion::V1 + } +} + +/// Holds the function symbols of an Executable +#[derive(Debug, PartialEq, Eq)] +pub struct FunctionRegistry { + pub(crate) map: BTreeMap, T)>, +} + +impl Default for FunctionRegistry { + fn default() -> Self { + Self { + map: BTreeMap::new(), + } + } +} + +impl FunctionRegistry { + /// Register a symbol with an explicit key + pub fn register_function( + &mut self, + key: u32, + name: impl Into>, + value: T, + ) -> Result<(), ElfError> { + match self.map.entry(key) { + Entry::Vacant(entry) => { + entry.insert((name.into(), value)); + } + Entry::Occupied(entry) => { + if entry.get().1 != value { + return Err(ElfError::SymbolHashCollision(key)); + } + } + } + Ok(()) + } + + /// Register a symbol with an implicit key + pub fn register_function_hashed( + &mut self, + name: impl Into>, + value: T, + ) -> Result { + let name = name.into(); + let key = ebpf::hash_symbol_name(name.as_slice()); + self.register_function(key, name, value)?; + Ok(key) + } + + /// Used for transitioning from SBPFv1 to SBPFv2 + pub(crate) fn register_function_hashed_legacy( + &mut self, + loader: &BuiltinProgram, + hash_symbol_name: bool, + name: impl Into>, + value: T, + ) -> Result + where + usize: From, + { + let name = name.into(); + let config = loader.get_config(); + let key = if hash_symbol_name { + let hash = if name == b"entrypoint" { + ebpf::hash_symbol_name(b"entrypoint") + } else { + ebpf::hash_symbol_name(&usize::from(value).to_le_bytes()) + }; + if config.external_internal_function_hash_collision + && loader.get_function_registry().lookup_by_key(hash).is_some() + { + return Err(ElfError::SymbolHashCollision(hash)); + } + hash + } else { + usize::from(value) as u32 + }; + self.register_function( + key, + if config.enable_symbol_and_section_labels || name == b"entrypoint" { + name + } else { + Vec::default() + }, + value, + )?; + Ok(key) + } + + /// Unregister a symbol again + pub fn unregister_function(&mut self, key: u32) { + self.map.remove(&key); + } + + /// Iterate over all keys + pub fn keys(&self) -> impl Iterator + '_ { + self.map.keys().cloned() + } + + /// Iterate over all entries + pub fn iter(&self) -> impl Iterator + '_ { + self.map + .iter() + .map(|(key, (name, value))| (*key, (name.as_slice(), *value))) + } + + /// Get a function by its key + pub fn lookup_by_key(&self, key: u32) -> Option<(&[u8], T)> { + // String::from_utf8_lossy(function_name).as_str() + self.map + .get(&key) + .map(|(function_name, value)| (function_name.as_slice(), *value)) + } + + /// Get a function by its name + pub fn lookup_by_name(&self, name: &[u8]) -> Option<(&[u8], T)> { + self.map + .values() + .find(|(function_name, _value)| function_name == name) + .map(|(function_name, value)| (function_name.as_slice(), *value)) + } + + /// Calculate memory size + pub fn mem_size(&self) -> usize { + std::mem::size_of::().saturating_add(self.map.iter().fold( + 0, + |state: usize, (_, (name, value))| { + state.saturating_add( + std::mem::size_of_val(value).saturating_add( + std::mem::size_of_val(name).saturating_add(name.capacity()), + ), + ) + }, + )) + } +} + +/// Syscall function without context +pub type BuiltinFunction = fn(*mut EbpfVm, u64, u64, u64, u64, u64); + +/// Represents the interface to a fixed functionality program +#[derive(Eq)] +pub struct BuiltinProgram { + /// Holds the Config if this is a loader program + config: Option>, + /// Function pointers by symbol + functions: FunctionRegistry>, +} + +impl PartialEq for BuiltinProgram { + fn eq(&self, other: &Self) -> bool { + self.config.eq(&other.config) && self.functions.eq(&other.functions) + } +} + +impl BuiltinProgram { + /// Constructs a loader built-in program + pub fn new_loader(config: Config, functions: FunctionRegistry>) -> Self { + Self { + config: Some(Box::new(config)), + functions, + } + } + + /// Constructs a built-in program + pub fn new_builtin(functions: FunctionRegistry>) -> Self { + Self { + config: None, + functions, + } + } + + /// Constructs a mock loader built-in program + pub fn new_mock() -> Self { + Self { + config: Some(Box::default()), + functions: FunctionRegistry::default(), + } + } + + /// Get the configuration settings assuming this is a loader program + pub fn get_config(&self) -> &Config { + self.config.as_ref().unwrap() + } + + /// Get the function registry + pub fn get_function_registry(&self) -> &FunctionRegistry> { + &self.functions + } + + /// Calculate memory size + pub fn mem_size(&self) -> usize { + std::mem::size_of::() + .saturating_add(if self.config.is_some() { + std::mem::size_of::() + } else { + 0 + }) + .saturating_add(self.functions.mem_size()) + } +} + +impl std::fmt::Debug for BuiltinProgram { + fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { + writeln!(f, "{:?}", unsafe { + // `derive(Debug)` does not know that `C: ContextObject` does not need to implement `Debug` + std::mem::transmute::<&FunctionRegistry>, &FunctionRegistry>( + &self.functions, + ) + })?; + Ok(()) + } +} + +/// Generates an adapter for a BuiltinFunction between the Rust and the VM interface +#[macro_export] +macro_rules! declare_builtin_function { + ($(#[$attr:meta])* $name:ident $(<$($generic_ident:tt : $generic_type:tt),+>)?, fn rust( + $vm:ident : &mut $ContextObject:ty, + $arg_a:ident : u64, + $arg_b:ident : u64, + $arg_c:ident : u64, + $arg_d:ident : u64, + $arg_e:ident : u64, + $memory_mapping:ident : &mut $MemoryMapping:ty, + ) -> $Result:ty { $($rust:tt)* }) => { + $(#[$attr])* + pub struct $name {} + impl $name { + /// Rust interface + pub fn rust $(<$($generic_ident : $generic_type),+>)? ( + $vm: &mut $ContextObject, + $arg_a: u64, + $arg_b: u64, + $arg_c: u64, + $arg_d: u64, + $arg_e: u64, + $memory_mapping: &mut $MemoryMapping, + ) -> $Result { + $($rust)* + } + /// VM interface + #[allow(clippy::too_many_arguments)] + pub fn vm $(<$($generic_ident : $generic_type),+>)? ( + $vm: *mut $crate::vm::EbpfVm<$ContextObject>, + $arg_a: u64, + $arg_b: u64, + $arg_c: u64, + $arg_d: u64, + $arg_e: u64, + ) { + use $crate::vm::ContextObject; + let vm = unsafe { + &mut *(($vm as *mut u64).offset(-($crate::vm::get_runtime_environment_key() as isize)) as *mut $crate::vm::EbpfVm<$ContextObject>) + }; + let config = vm.loader.get_config(); + if config.enable_instruction_meter { + vm.context_object_pointer.consume(vm.previous_instruction_meter - vm.due_insn_count); + } + let converted_result: $crate::error::ProgramResult = Self::rust $(::<$($generic_ident),+>)?( + vm.context_object_pointer, $arg_a, $arg_b, $arg_c, $arg_d, $arg_e, &mut vm.memory_mapping, + ).map_err(|err| $crate::error::EbpfError::SyscallError(err)).into(); + vm.program_result = converted_result; + if config.enable_instruction_meter { + vm.previous_instruction_meter = vm.context_object_pointer.get_remaining(); + } + } + } + }; +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{program::BuiltinFunction, syscalls, vm::TestContextObject}; + + #[test] + fn test_builtin_program_eq() { + let mut function_registry_a = + FunctionRegistry::>::default(); + function_registry_a + .register_function_hashed(*b"log", syscalls::SyscallString::vm) + .unwrap(); + function_registry_a + .register_function_hashed(*b"log_64", syscalls::SyscallU64::vm) + .unwrap(); + let mut function_registry_b = + FunctionRegistry::>::default(); + function_registry_b + .register_function_hashed(*b"log_64", syscalls::SyscallU64::vm) + .unwrap(); + function_registry_b + .register_function_hashed(*b"log", syscalls::SyscallString::vm) + .unwrap(); + let mut function_registry_c = + FunctionRegistry::>::default(); + function_registry_c + .register_function_hashed(*b"log_64", syscalls::SyscallU64::vm) + .unwrap(); + let builtin_program_a = BuiltinProgram::new_loader(Config::default(), function_registry_a); + let builtin_program_b = BuiltinProgram::new_loader(Config::default(), function_registry_b); + assert_eq!(builtin_program_a, builtin_program_b); + let builtin_program_c = BuiltinProgram::new_loader(Config::default(), function_registry_c); + assert_ne!(builtin_program_a, builtin_program_c); + } +} diff --git a/rbpf/src/static_analysis.rs b/rbpf/src/static_analysis.rs new file mode 100644 index 00000000000000..88ef1f2d3cdc8a --- /dev/null +++ b/rbpf/src/static_analysis.rs @@ -0,0 +1,1173 @@ +#![allow(clippy::arithmetic_side_effects)] +//! Static Byte Code Analysis + +use crate::disassembler::disassemble_instruction; +use crate::{ + ebpf, + elf::Executable, + error::EbpfError, + vm::{ContextObject, DynamicAnalysis, TestContextObject}, +}; +use rustc_demangle::demangle; +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; + +/// Register state recorded after executing one instruction +/// +/// The last register is the program counter (aka pc). +pub type TraceLogEntry = [u64; 12]; + +/// Used for topological sort +#[derive(PartialEq, Eq, Debug)] +pub struct TopologicalIndex { + /// Strongly connected component ID + pub scc_id: usize, + /// Discovery order inside a strongly connected component + pub discovery: usize, +} + +impl Default for TopologicalIndex { + fn default() -> Self { + Self { + scc_id: usize::MAX, + discovery: usize::MAX, + } + } +} + +impl Ord for TopologicalIndex { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + (self.scc_id.cmp(&other.scc_id)).then(self.discovery.cmp(&other.discovery)) + } +} + +impl PartialOrd for TopologicalIndex { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +/// A node of the control-flow graph +#[derive(Debug)] +pub struct CfgNode { + /// Human readable name + pub label: String, + /// Predecessors which can jump to the start of this basic block + pub sources: Vec, + /// Successors which the end of this basic block can jump to + pub destinations: Vec, + /// Range of the instructions belonging to this basic block + pub instructions: std::ops::Range, + /// Topological index + pub topo_index: TopologicalIndex, + /// Immediate dominator (the last control flow junction) + pub dominator_parent: usize, + /// All basic blocks which can only be reached through this one + pub dominated_children: Vec, +} + +/// An instruction or Φ node of the data-flow graph +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Debug)] +pub enum DfgNode { + /// Points to a single instruction + InstructionNode(usize), + /// Points to a basic block which starts with a Φ node (because it has multiple CFG sources) + PhiNode(usize), +} + +/// The register or memory location a data-flow edge guards +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Debug)] +pub enum DataResource { + /// A BPF register + Register(u8), + /// A (potentially writeable) memory location + Memory, +} + +/// The kind of a data-flow edge +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Debug)] +pub enum DfgEdgeKind { + /// This kind represents data-flow edges which actually carry data + /// + /// E.g. the destination reads a resource, written by the source. + Filled, + /// This kind incurrs no actual data-flow + /// + /// E.g. the destination overwrites a resource, written by the source. + Empty, +} + +/// An edge of the data-flow graph +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Debug)] +pub struct DfgEdge { + /// The DfgNode that the destination depends on + pub source: DfgNode, + /// The DfgNode that depends on the source + pub destination: DfgNode, + /// Write-read or write-write + pub kind: DfgEdgeKind, + /// A register or memory location + pub resource: DataResource, +} + +impl Default for CfgNode { + fn default() -> Self { + Self { + label: String::new(), + sources: Vec::new(), + destinations: Vec::new(), + instructions: 0..0, + topo_index: TopologicalIndex::default(), + dominator_parent: usize::MAX, + dominated_children: Vec::new(), + } + } +} + +/// Result of the executable analysis +pub struct Analysis<'a> { + /// The program which is analyzed + executable: &'a Executable, + /// Plain list of instructions as they occur in the executable + pub instructions: Vec, + /// Functions in the executable + pub functions: BTreeMap, + /// Nodes of the control-flow graph + pub cfg_nodes: BTreeMap, + /// Topological order of cfg_nodes + pub topological_order: Vec, + /// CfgNode where the execution starts + pub entrypoint: usize, + /// Virtual CfgNode that reaches all functions + pub super_root: usize, + /// Data flow edges (the keys are DfgEdge sources) + pub dfg_forward_edges: BTreeMap>, + /// Data flow edges (the keys are DfgEdge destinations) + pub dfg_reverse_edges: BTreeMap>, +} + +impl<'a> Analysis<'a> { + /// Analyze an executable statically + pub fn from_executable( + executable: &'a Executable, + ) -> Result { + let (_program_vm_addr, program) = executable.get_text_bytes(); + let mut functions = BTreeMap::new(); + for (key, (function_name, pc)) in executable.get_function_registry().iter() { + functions.insert( + pc, + (key, String::from_utf8_lossy(function_name).to_string()), + ); + } + debug_assert!( + program.len() % ebpf::INSN_SIZE == 0, + "eBPF program length must be a multiple of {:?} octets is {:?}", + ebpf::INSN_SIZE, + program.len() + ); + let mut instructions = Vec::with_capacity(program.len() / ebpf::INSN_SIZE); + let mut insn_ptr: usize = 0; + while insn_ptr * ebpf::INSN_SIZE < program.len() { + let mut insn = ebpf::get_insn_unchecked(program, insn_ptr); + if insn.opc == ebpf::LD_DW_IMM { + insn_ptr += 1; + if insn_ptr * ebpf::INSN_SIZE >= program.len() { + break; + } + ebpf::augment_lddw_unchecked(program, &mut insn); + } + instructions.push(insn); + insn_ptr += 1; + } + let mut result = Self { + // Removes the generic ContextObject which is safe because we are not going to execute the program + executable: unsafe { std::mem::transmute(executable) }, + instructions, + functions, + cfg_nodes: BTreeMap::new(), + topological_order: Vec::new(), + entrypoint: executable.get_entrypoint_instruction_offset(), + super_root: insn_ptr, + dfg_forward_edges: BTreeMap::new(), + dfg_reverse_edges: BTreeMap::new(), + }; + result.split_into_basic_blocks(false); + result.control_flow_graph_tarjan(); + result.control_flow_graph_dominance_hierarchy(); + result.label_basic_blocks(); + let basic_block_outputs = result.intra_basic_block_data_flow(); + result.inter_basic_block_data_flow(basic_block_outputs); + Ok(result) + } + + fn link_cfg_edges(&mut self, cfg_edges: Vec<(usize, Vec)>, both_directions: bool) { + for (source, destinations) in cfg_edges { + if both_directions { + self.cfg_nodes.get_mut(&source).unwrap().destinations = destinations.clone(); + } + for destination in &destinations { + self.cfg_nodes + .get_mut(destination) + .unwrap() + .sources + .push(source); + } + } + } + + /// Splits the sequence of instructions into basic blocks + /// + /// Also links the control-flow graph edges between the basic blocks. + pub fn split_into_basic_blocks(&mut self, flatten_call_graph: bool) { + self.cfg_nodes.insert(0, CfgNode::default()); + for pc in self.functions.keys() { + self.cfg_nodes.entry(*pc).or_default(); + } + let mut cfg_edges = BTreeMap::new(); + for insn in self.instructions.iter() { + let target_pc = (insn.ptr as isize + insn.off as isize + 1) as usize; + match insn.opc { + ebpf::CALL_IMM => { + if let Some((function_name, _function)) = self + .executable + .get_loader() + .get_function_registry() + .lookup_by_key(insn.imm as u32) + { + if function_name == b"abort" { + self.cfg_nodes.entry(insn.ptr + 1).or_default(); + cfg_edges.insert(insn.ptr, (insn.opc, Vec::new())); + } + } else if let Some((_function_name, target_pc)) = self + .executable + .get_function_registry() + .lookup_by_key(insn.imm as u32) + { + self.cfg_nodes.entry(insn.ptr + 1).or_default(); + self.cfg_nodes.entry(target_pc).or_default(); + let destinations = if flatten_call_graph { + vec![insn.ptr + 1, target_pc] + } else { + vec![insn.ptr + 1] + }; + cfg_edges.insert(insn.ptr, (insn.opc, destinations)); + } + } + ebpf::CALL_REG => { + // Abnormal CFG edge + self.cfg_nodes.entry(insn.ptr + 1).or_default(); + let destinations = if flatten_call_graph { + vec![insn.ptr + 1, self.super_root] + } else { + vec![insn.ptr + 1] + }; + cfg_edges.insert(insn.ptr, (insn.opc, destinations)); + } + ebpf::EXIT => { + self.cfg_nodes.entry(insn.ptr + 1).or_default(); + cfg_edges.insert(insn.ptr, (insn.opc, Vec::new())); + } + ebpf::JA => { + self.cfg_nodes.entry(insn.ptr + 1).or_default(); + self.cfg_nodes.entry(target_pc).or_default(); + cfg_edges.insert(insn.ptr, (insn.opc, vec![target_pc])); + } + ebpf::JEQ_IMM + | ebpf::JGT_IMM + | ebpf::JGE_IMM + | ebpf::JLT_IMM + | ebpf::JLE_IMM + | ebpf::JSET_IMM + | ebpf::JNE_IMM + | ebpf::JSGT_IMM + | ebpf::JSGE_IMM + | ebpf::JSLT_IMM + | ebpf::JSLE_IMM + | ebpf::JEQ_REG + | ebpf::JGT_REG + | ebpf::JGE_REG + | ebpf::JLT_REG + | ebpf::JLE_REG + | ebpf::JSET_REG + | ebpf::JNE_REG + | ebpf::JSGT_REG + | ebpf::JSGE_REG + | ebpf::JSLT_REG + | ebpf::JSLE_REG => { + self.cfg_nodes.entry(insn.ptr + 1).or_default(); + self.cfg_nodes.entry(target_pc).or_default(); + cfg_edges.insert(insn.ptr, (insn.opc, vec![insn.ptr + 1, target_pc])); + } + _ => {} + } + } + { + let mut cfg_nodes = BTreeMap::new(); + std::mem::swap(&mut self.cfg_nodes, &mut cfg_nodes); + let mut cfg_nodes = cfg_nodes + .into_iter() + .filter(|(cfg_node_start, _cfg_node)| { + match self + .instructions + .binary_search_by(|insn| insn.ptr.cmp(cfg_node_start)) + { + Ok(_) => true, + Err(_index) => false, + } + }) + .collect(); + std::mem::swap(&mut self.cfg_nodes, &mut cfg_nodes); + for cfg_edge in cfg_edges.values_mut() { + cfg_edge + .1 + .retain(|destination| self.cfg_nodes.contains_key(destination)); + } + let mut functions = BTreeMap::new(); + std::mem::swap(&mut self.functions, &mut functions); + let mut functions = functions + .into_iter() + .filter(|(function_start, _)| self.cfg_nodes.contains_key(function_start)) + .collect(); + std::mem::swap(&mut self.functions, &mut functions); + } + { + let mut instruction_index = 0; + let mut cfg_node_iter = self.cfg_nodes.iter_mut().peekable(); + let mut cfg_edge_iter = cfg_edges.iter_mut().peekable(); + while let Some((cfg_node_start, cfg_node)) = cfg_node_iter.next() { + let cfg_node_end = if let Some(next_cfg_node) = cfg_node_iter.peek() { + *next_cfg_node.0 - 1 + } else { + self.instructions.last().unwrap().ptr + }; + cfg_node.instructions.start = instruction_index; + while instruction_index < self.instructions.len() { + if self.instructions[instruction_index].ptr <= cfg_node_end { + instruction_index += 1; + cfg_node.instructions.end = instruction_index; + } else { + break; + } + } + if let Some(next_cfg_edge) = cfg_edge_iter.peek() { + if *next_cfg_edge.0 <= cfg_node_end { + cfg_node.destinations = next_cfg_edge.1 .1.clone(); + cfg_edge_iter.next(); + continue; + } + } + if let Some(next_cfg_node) = cfg_node_iter.peek() { + if !self.functions.contains_key(cfg_node_start) { + cfg_node.destinations.push(*next_cfg_node.0); + } + } + } + } + self.link_cfg_edges( + self.cfg_nodes + .iter() + .map(|(source, cfg_node)| (*source, cfg_node.destinations.clone())) + .collect::)>>(), + false, + ); + if flatten_call_graph { + let mut destinations = Vec::new(); + let mut cfg_edges = Vec::new(); + for (source, cfg_node) in self.cfg_nodes.iter() { + if self.functions.contains_key(source) { + destinations = cfg_node + .sources + .iter() + .map(|destination| { + self.instructions + [self.cfg_nodes.get(destination).unwrap().instructions.end] + .ptr + }) + .collect(); + } + if cfg_node.destinations.is_empty() + && self.instructions[cfg_node.instructions.end - 1].opc == ebpf::EXIT + { + cfg_edges.push((*source, destinations.clone())); + } + } + self.link_cfg_edges(cfg_edges, true); + } + } + + /// Gives the basic blocks names + pub fn label_basic_blocks(&mut self) { + for (pc, cfg_node) in self.cfg_nodes.iter_mut() { + cfg_node.label = if let Some(function) = self.functions.get(pc) { + demangle(&function.1).to_string() + } else { + format!("lbb_{pc}") + }; + } + if let Some(super_root) = self.cfg_nodes.get_mut(&self.super_root) { + super_root.label = "super_root".to_string(); + } + } + + /// Generates labels for assembler code + pub fn disassemble_label( + &self, + output: &mut W, + suppress_extra_newlines: bool, + pc: usize, + last_basic_block: &mut usize, + ) -> std::io::Result<()> { + if let Some(cfg_node) = self.cfg_nodes.get(&pc) { + let is_function = self.functions.contains_key(&pc); + if is_function || cfg_node.sources != vec![*last_basic_block] { + if is_function && !suppress_extra_newlines { + writeln!(output)?; + } + writeln!(output, "{}:", cfg_node.label)?; + } + let last_insn = &self.instructions[cfg_node.instructions.end - 1]; + *last_basic_block = if last_insn.opc == ebpf::JA { + usize::MAX + } else { + pc + }; + } + Ok(()) + } + + /// Generates assembler code for a single instruction + pub fn disassemble_instruction(&self, insn: &ebpf::Insn) -> String { + disassemble_instruction( + insn, + &self.cfg_nodes, + self.executable.get_function_registry(), + self.executable.get_loader(), + self.executable.get_sbpf_version(), + ) + } + + /// Generates assembler code for the analyzed executable + pub fn disassemble(&self, output: &mut W) -> std::io::Result<()> { + let mut last_basic_block = usize::MAX; + for insn in self.instructions.iter() { + self.disassemble_label( + output, + Some(insn) == self.instructions.first(), + insn.ptr, + &mut last_basic_block, + )?; + writeln!(output, " {}", self.disassemble_instruction(insn))?; + } + Ok(()) + } + + /// Use this method to print the trace log + pub fn disassemble_trace_log( + &self, + output: &mut W, + trace_log: &[TraceLogEntry], + ) -> Result<(), std::io::Error> { + let mut pc_to_insn_index = vec![ + 0usize; + self.instructions + .last() + .map(|insn| insn.ptr + 2) + .unwrap_or(0) + ]; + for (index, insn) in self.instructions.iter().enumerate() { + pc_to_insn_index[insn.ptr] = index; + pc_to_insn_index[insn.ptr + 1] = index; + } + for (index, entry) in trace_log.iter().enumerate() { + let pc = entry[11] as usize; + let insn = &self.instructions[pc_to_insn_index[pc]]; + writeln!( + output, + "{:5?} {:016X?} {:5?}: {}", + index, + &entry[0..11], + pc, + self.disassemble_instruction(insn), + )?; + } + Ok(()) + } + + /// Iterates over the cfg_nodes while providing the PC range of the function they belong to. + pub fn iter_cfg_by_function( + &self, + ) -> impl Iterator, usize, &CfgNode)> + '_ { + let mut function_iter = self.functions.keys().peekable(); + let mut function_start = *function_iter.next().unwrap(); + self.cfg_nodes + .iter() + .map(move |(cfg_node_start, cfg_node)| { + if Some(&cfg_node_start) == function_iter.peek() { + function_start = *function_iter.next().unwrap(); + } + let function_end = if let Some(next_function) = function_iter.peek() { + **next_function + } else { + self.instructions.last().unwrap().ptr + 1 + }; + (function_start..function_end, *cfg_node_start, cfg_node) + }) + } + + /// Generates a graphviz DOT of the analyzed executable + pub fn visualize_graphically( + &self, + output: &mut W, + dynamic_analysis: Option<&DynamicAnalysis>, + ) -> std::io::Result<()> { + fn html_escape(string: &str) -> String { + string + .replace('&', "&") + .replace('<', "<") + .replace('>', ">") + .replace('\"', """) + } + fn emit_cfg_node( + output: &mut W, + dynamic_analysis: Option<&DynamicAnalysis>, + analysis: &Analysis, + function_range: std::ops::Range, + alias_nodes: &mut HashSet, + cfg_node_start: usize, + ) -> std::io::Result<()> { + let cfg_node = &analysis.cfg_nodes[&cfg_node_start]; + writeln!(output, " lbb_{} [label=<{}
>];", + cfg_node_start, + analysis.instructions[cfg_node.instructions.clone()].iter() + .map(|insn| { + let desc = analysis.disassemble_instruction( + insn + ); + if let Some(split_index) = desc.find(' ') { + let mut rest = desc[split_index+1..].to_string(); + if rest.len() > MAX_CELL_CONTENT_LENGTH + 1 { + rest.truncate(MAX_CELL_CONTENT_LENGTH); + rest = format!("{rest}…"); + } + format!("{}{}", html_escape(&desc[..split_index]), html_escape(&rest)) + } else { + format!("{}", html_escape(&desc)) + } + }) + .collect::>() + .join("") + )?; + if let Some(dynamic_analysis) = dynamic_analysis { + if let Some(recorded_edges) = dynamic_analysis.edges.get(&cfg_node_start) { + for destination in recorded_edges.keys() { + if !function_range.contains(destination) { + alias_nodes.insert(*destination); + } + } + } + } + for child in &cfg_node.dominated_children { + emit_cfg_node( + output, + dynamic_analysis, + analysis, + function_range.clone(), + alias_nodes, + *child, + )?; + } + Ok(()) + } + writeln!( + output, + "digraph {{ + graph [ + rankdir=LR; + concentrate=True; + style=filled; + color=lightgrey; + ]; + node [ + shape=rect; + style=filled; + fillcolor=white; + fontname=\"Courier New\"; + ]; + edge [ + fontname=\"Courier New\"; + ];" + )?; + const MAX_CELL_CONTENT_LENGTH: usize = 15; + let mut function_iter = self.functions.keys().peekable(); + while let Some(function_start) = function_iter.next() { + let function_end = if let Some(next_function) = function_iter.peek() { + **next_function + } else { + self.instructions.last().unwrap().ptr + 1 + }; + let mut alias_nodes = HashSet::new(); + writeln!(output, " subgraph cluster_{} {{", *function_start)?; + writeln!( + output, + " label={:?};", + html_escape(&self.cfg_nodes[function_start].label) + )?; + writeln!(output, " tooltip=lbb_{};", *function_start)?; + emit_cfg_node( + output, + dynamic_analysis, + self, + *function_start..function_end, + &mut alias_nodes, + *function_start, + )?; + for alias_node in alias_nodes.iter() { + writeln!( + output, + " alias_{}_lbb_{} [", + *function_start, *alias_node + )?; + writeln!(output, " label=lbb_{:?};", *alias_node)?; + writeln!(output, " tooltip=lbb_{:?};", *alias_node)?; + writeln!(output, " URL=\"#lbb_{:?}\";", *alias_node)?; + writeln!(output, " ];")?; + } + writeln!(output, " }}")?; + } + for (function_range, cfg_node_start, cfg_node) in self.iter_cfg_by_function() { + if cfg_node_start != cfg_node.dominator_parent { + writeln!( + output, + " lbb_{} -> lbb_{} [style=dotted; arrowhead=none];", + cfg_node_start, cfg_node.dominator_parent, + )?; + } + let mut edges: BTreeMap = cfg_node + .destinations + .iter() + .map(|destination| (*destination, 0)) + .collect(); + if let Some(dynamic_analysis) = dynamic_analysis { + if let Some(recorded_edges) = dynamic_analysis.edges.get(&cfg_node_start) { + for (destination, recorded_counter) in recorded_edges.iter() { + edges + .entry(*destination) + .and_modify(|counter| { + *counter = *recorded_counter; + }) + .or_insert(*recorded_counter); + } + } + } + let counter_sum: usize = edges.values().sum(); + if counter_sum == 0 && !edges.is_empty() { + writeln!( + output, + " lbb_{} -> {{{}}};", + cfg_node_start, + edges + .keys() + .map(|destination| format!("lbb_{}", *destination)) + .collect::>() + .join(" ") + )?; + } else if let Some(dynamic_analysis) = dynamic_analysis { + for (destination, counter) in edges { + write!(output, " lbb_{cfg_node_start} -> ")?; + if function_range.contains(&destination) { + write!(output, "lbb_{destination}")?; + } else { + write!( + output, + "alias_{0}_lbb_{1}", + function_range.start, destination + )?; + } + writeln!( + output, + " [label=\"{}\";color=\"{} 1.0 {}.0\"];", + counter, + counter as f32 / (dynamic_analysis.edge_counter_max as f32 * 3.0) + + 2.0 / 3.0, + (counter != 0) as i32, + )?; + } + } + } + writeln!(output, "}}")?; + Ok(()) + } + + /// Finds the strongly connected components + /// + /// Generates a topological order as by-product. + pub fn control_flow_graph_tarjan(&mut self) { + if self.cfg_nodes.is_empty() { + return; + } + struct NodeState { + cfg_node: usize, + discovery: usize, + lowlink: usize, + scc_id: usize, + is_on_scc_stack: bool, + } + let mut nodes = self + .cfg_nodes + .iter_mut() + .enumerate() + .map(|(v, (key, cfg_node))| { + cfg_node.topo_index.scc_id = v; + NodeState { + cfg_node: *key, + discovery: usize::MAX, + lowlink: usize::MAX, + scc_id: usize::MAX, + is_on_scc_stack: false, + } + }) + .collect::>(); + let mut scc_id = 0; + let mut scc_stack = Vec::new(); + let mut discovered = 0; + let mut next_v = 1; + let mut recursion_stack = vec![(0, 0)]; + 'dfs: while let Some((v, edge_index)) = recursion_stack.pop() { + let node = &mut nodes[v]; + if edge_index == 0 { + node.discovery = discovered; + node.lowlink = discovered; + node.is_on_scc_stack = true; + scc_stack.push(v); + discovered += 1; + } + let cfg_node = self.cfg_nodes.get(&node.cfg_node).unwrap(); + for j in edge_index..cfg_node.destinations.len() { + let w = self + .cfg_nodes + .get(&cfg_node.destinations[j]) + .unwrap() + .topo_index + .scc_id; + if nodes[w].discovery == usize::MAX { + recursion_stack.push((v, j + 1)); + recursion_stack.push((w, 0)); + continue 'dfs; + } else if nodes[w].is_on_scc_stack { + nodes[v].lowlink = nodes[v].lowlink.min(nodes[w].discovery); + } + } + if nodes[v].discovery == nodes[v].lowlink { + let mut index_in_scc = 0; + while let Some(w) = scc_stack.pop() { + let node = &mut nodes[w]; + node.is_on_scc_stack = false; + node.scc_id = scc_id; + node.discovery = index_in_scc; + index_in_scc += 1; + if w == v { + break; + } + } + scc_id += 1; + } + if let Some((w, _)) = recursion_stack.last() { + nodes[*w].lowlink = nodes[*w].lowlink.min(nodes[v].lowlink); + } else { + loop { + if next_v == nodes.len() { + break 'dfs; + } + if nodes[next_v].discovery == usize::MAX { + break; + } + next_v += 1; + } + recursion_stack.push((next_v, 0)); + next_v += 1; + } + } + for node in &nodes { + let cfg_node = self.cfg_nodes.get_mut(&node.cfg_node).unwrap(); + cfg_node.topo_index = TopologicalIndex { + scc_id: node.scc_id, + discovery: node.discovery, + }; + } + let mut topological_order = self.cfg_nodes.keys().cloned().collect::>(); + topological_order.sort_by(|a, b| { + self.cfg_nodes[b] + .topo_index + .cmp(&self.cfg_nodes[a].topo_index) + }); + self.topological_order = topological_order; + let mut super_root = CfgNode { + instructions: self.instructions.len()..self.instructions.len(), + ..CfgNode::default() + }; + let mut first_node = self.topological_order.first().cloned(); + let mut has_external_source = false; + for (index, v) in self.topological_order.iter().enumerate() { + let cfg_node = &self.cfg_nodes[v]; + has_external_source |= cfg_node.sources.iter().any(|source| { + self.cfg_nodes[source].topo_index.scc_id != cfg_node.topo_index.scc_id + }); + if self + .topological_order + .get(index + 1) + .map(|next_v| { + self.cfg_nodes[next_v].topo_index.scc_id != cfg_node.topo_index.scc_id + }) + .unwrap_or(true) + { + if !has_external_source && first_node != Some(self.super_root) { + super_root.destinations.push(first_node.unwrap()); + } + first_node = self.topological_order.get(index + 1).cloned(); + has_external_source = false; + } + } + for v in super_root.destinations.iter() { + let cfg_node = self.cfg_nodes.get_mut(v).unwrap(); + cfg_node.sources.push(self.super_root); + self.functions.entry(*v).or_insert_with(|| { + let name = format!("function_{}", *v); + let hash = ebpf::hash_symbol_name(name.as_bytes()); + (hash, name) + }); + } + self.cfg_nodes.insert(self.super_root, super_root); + } + + fn control_flow_graph_dominance_intersect(&self, mut a: usize, mut b: usize) -> usize { + while a != b { + match self.cfg_nodes[&a] + .topo_index + .cmp(&self.cfg_nodes[&b].topo_index) + { + std::cmp::Ordering::Greater => { + b = self.cfg_nodes[&b].dominator_parent; + } + std::cmp::Ordering::Less => { + a = self.cfg_nodes[&a].dominator_parent; + } + std::cmp::Ordering::Equal => unreachable!(), + } + } + b + } + + /// Finds the dominance hierarchy of the control-flow graph + /// + /// Uses the Cooper-Harvey-Kennedy algorithm. + pub fn control_flow_graph_dominance_hierarchy(&mut self) { + if self.cfg_nodes.is_empty() { + return; + } + self.cfg_nodes + .get_mut(&self.super_root) + .unwrap() + .dominator_parent = self.super_root; + loop { + let mut terminate = true; + for b in self.topological_order.iter() { + let cfg_node = &self.cfg_nodes[b]; + let mut dominator_parent = usize::MAX; + for p in cfg_node.sources.iter() { + if self.cfg_nodes[p].dominator_parent == usize::MAX { + continue; + } + dominator_parent = if dominator_parent == usize::MAX { + *p + } else { + self.control_flow_graph_dominance_intersect(*p, dominator_parent) + }; + } + if cfg_node.dominator_parent != dominator_parent { + let cfg_node = self.cfg_nodes.get_mut(b).unwrap(); + cfg_node.dominator_parent = dominator_parent; + terminate = false; + } + } + if terminate { + break; + } + } + for b in self.topological_order.iter() { + let cfg_node = &self.cfg_nodes[b]; + assert_ne!(cfg_node.dominator_parent, usize::MAX); + if *b == cfg_node.dominator_parent { + continue; + } + let p = cfg_node.dominator_parent; + let dominator_cfg_node = self.cfg_nodes.get_mut(&p).unwrap(); + dominator_cfg_node.dominated_children.push(*b); + } + } + + /// Connect the dependencies between the instructions inside of the basic blocks + pub fn intra_basic_block_data_flow(&mut self) -> BTreeMap> { + fn bind( + state: &mut ( + usize, + BTreeMap>, + HashMap, + ), + insn: &ebpf::Insn, + is_output: bool, + resource: DataResource, + ) { + let kind = if is_output { + DfgEdgeKind::Empty + } else { + DfgEdgeKind::Filled + }; + let source = if let Some(source) = state.2.get(&resource) { + DfgNode::InstructionNode(*source) + } else { + DfgNode::PhiNode(state.0) + }; + let destination = DfgNode::InstructionNode(insn.ptr); + state.1.entry(source.clone()).or_default().insert(DfgEdge { + source, + destination, + kind, + resource: resource.clone(), + }); + if is_output { + state.2.insert(resource, insn.ptr); + } + } + let mut state = (0, BTreeMap::new(), HashMap::new()); + let data_dependencies = self + .cfg_nodes + .iter() + .map(|(basic_block_start, basic_block)| { + state.0 = *basic_block_start; + for insn in self.instructions[basic_block.instructions.clone()].iter() { + match insn.opc { + ebpf::LD_DW_IMM => { + bind(&mut state, insn, true, DataResource::Register(insn.dst)); + } + ebpf::LD_B_REG | ebpf::LD_H_REG | ebpf::LD_W_REG | ebpf::LD_DW_REG => { + bind(&mut state, insn, false, DataResource::Memory); + bind(&mut state, insn, false, DataResource::Register(insn.src)); + bind(&mut state, insn, true, DataResource::Register(insn.dst)); + } + ebpf::ST_B_IMM | ebpf::ST_H_IMM | ebpf::ST_W_IMM | ebpf::ST_DW_IMM => { + bind(&mut state, insn, false, DataResource::Register(insn.dst)); + bind(&mut state, insn, true, DataResource::Memory); + } + ebpf::ST_B_REG | ebpf::ST_H_REG | ebpf::ST_W_REG | ebpf::ST_DW_REG => { + bind(&mut state, insn, false, DataResource::Register(insn.src)); + bind(&mut state, insn, false, DataResource::Register(insn.dst)); + bind(&mut state, insn, true, DataResource::Memory); + } + ebpf::ADD32_IMM + | ebpf::SUB32_IMM + | ebpf::MUL32_IMM + | ebpf::DIV32_IMM + | ebpf::SDIV32_IMM + | ebpf::OR32_IMM + | ebpf::AND32_IMM + | ebpf::LSH32_IMM + | ebpf::RSH32_IMM + | ebpf::MOD32_IMM + | ebpf::XOR32_IMM + | ebpf::ARSH32_IMM + | ebpf::ADD64_IMM + | ebpf::SUB64_IMM + | ebpf::MUL64_IMM + | ebpf::DIV64_IMM + | ebpf::SDIV64_IMM + | ebpf::OR64_IMM + | ebpf::AND64_IMM + | ebpf::LSH64_IMM + | ebpf::RSH64_IMM + | ebpf::MOD64_IMM + | ebpf::XOR64_IMM + | ebpf::ARSH64_IMM + | ebpf::HOR64_IMM + | ebpf::NEG32 + | ebpf::NEG64 + | ebpf::LE + | ebpf::BE => { + bind(&mut state, insn, false, DataResource::Register(insn.dst)); + bind(&mut state, insn, true, DataResource::Register(insn.dst)); + } + ebpf::MOV32_IMM | ebpf::MOV64_IMM => { + bind(&mut state, insn, true, DataResource::Register(insn.dst)); + } + ebpf::ADD32_REG + | ebpf::SUB32_REG + | ebpf::MUL32_REG + | ebpf::DIV32_REG + | ebpf::SDIV32_REG + | ebpf::OR32_REG + | ebpf::AND32_REG + | ebpf::LSH32_REG + | ebpf::RSH32_REG + | ebpf::MOD32_REG + | ebpf::XOR32_REG + | ebpf::ARSH32_REG + | ebpf::ADD64_REG + | ebpf::SUB64_REG + | ebpf::MUL64_REG + | ebpf::DIV64_REG + | ebpf::SDIV64_REG + | ebpf::OR64_REG + | ebpf::AND64_REG + | ebpf::LSH64_REG + | ebpf::RSH64_REG + | ebpf::MOD64_REG + | ebpf::XOR64_REG + | ebpf::ARSH64_REG => { + bind(&mut state, insn, false, DataResource::Register(insn.src)); + bind(&mut state, insn, false, DataResource::Register(insn.dst)); + bind(&mut state, insn, true, DataResource::Register(insn.dst)); + } + ebpf::MOV32_REG | ebpf::MOV64_REG => { + bind(&mut state, insn, false, DataResource::Register(insn.src)); + bind(&mut state, insn, true, DataResource::Register(insn.dst)); + } + ebpf::JEQ_IMM + | ebpf::JGT_IMM + | ebpf::JGE_IMM + | ebpf::JLT_IMM + | ebpf::JLE_IMM + | ebpf::JSET_IMM + | ebpf::JNE_IMM + | ebpf::JSGT_IMM + | ebpf::JSGE_IMM + | ebpf::JSLT_IMM + | ebpf::JSLE_IMM => { + bind(&mut state, insn, false, DataResource::Register(insn.dst)); + } + ebpf::JEQ_REG + | ebpf::JGT_REG + | ebpf::JGE_REG + | ebpf::JLT_REG + | ebpf::JLE_REG + | ebpf::JSET_REG + | ebpf::JNE_REG + | ebpf::JSGT_REG + | ebpf::JSGE_REG + | ebpf::JSLT_REG + | ebpf::JSLE_REG => { + bind(&mut state, insn, false, DataResource::Register(insn.src)); + bind(&mut state, insn, false, DataResource::Register(insn.dst)); + } + ebpf::CALL_REG | ebpf::CALL_IMM => { + if insn.opc == ebpf::CALL_REG + && !(ebpf::FIRST_SCRATCH_REG + ..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS) + .contains(&(insn.imm as usize)) + { + bind( + &mut state, + insn, + false, + DataResource::Register(insn.imm as u8), + ); + } + bind(&mut state, insn, false, DataResource::Memory); + bind(&mut state, insn, true, DataResource::Memory); + for reg in (0..ebpf::FIRST_SCRATCH_REG).chain([10].iter().cloned()) { + bind(&mut state, insn, false, DataResource::Register(reg as u8)); + bind(&mut state, insn, true, DataResource::Register(reg as u8)); + } + } + ebpf::EXIT => { + bind(&mut state, insn, false, DataResource::Memory); + for reg in (0..ebpf::FIRST_SCRATCH_REG).chain([10].iter().cloned()) { + bind(&mut state, insn, false, DataResource::Register(reg as u8)); + } + } + _ => {} + } + } + let mut deps = HashMap::new(); + std::mem::swap(&mut deps, &mut state.2); + (*basic_block_start, deps) + }) + .collect(); + self.dfg_forward_edges = state.1; + data_dependencies + } + + /// Connect the dependencies inbetween the basic blocks + pub fn inter_basic_block_data_flow( + &mut self, + basic_block_outputs: BTreeMap>, + ) { + let mut continue_propagation = true; + while continue_propagation { + continue_propagation = false; + for basic_block_start in self.topological_order.iter().rev() { + if !self + .dfg_forward_edges + .contains_key(&DfgNode::PhiNode(*basic_block_start)) + { + continue; + } + let basic_block = &self.cfg_nodes[basic_block_start]; + let mut edges = BTreeSet::new(); + std::mem::swap( + self.dfg_forward_edges + .get_mut(&DfgNode::PhiNode(*basic_block_start)) + .unwrap(), + &mut edges, + ); + for predecessor in basic_block.sources.iter() { + let provided_outputs = &basic_block_outputs[predecessor]; + for edge in edges.iter() { + let mut source_is_a_phi_node = false; + let source = if let Some(source) = provided_outputs.get(&edge.resource) { + DfgNode::InstructionNode(*source) + } else { + source_is_a_phi_node = true; + DfgNode::PhiNode(*predecessor) + }; + let mut edge = edge.clone(); + if basic_block.sources.len() != 1 { + edge.destination = DfgNode::PhiNode(*basic_block_start); + } + if self + .dfg_forward_edges + .entry(source.clone()) + .or_default() + .insert(edge.clone()) + && source_is_a_phi_node + && source != DfgNode::PhiNode(*basic_block_start) + { + continue_propagation = true; + } + } + } + let reflective_edges = self + .dfg_forward_edges + .get_mut(&DfgNode::PhiNode(*basic_block_start)) + .unwrap(); + for edge in reflective_edges.iter() { + if edges.insert(edge.clone()) { + continue_propagation = true; + } + } + std::mem::swap(reflective_edges, &mut edges); + } + } + for (basic_block_start, basic_block) in self.cfg_nodes.iter() { + if basic_block.sources.len() == 1 { + self.dfg_forward_edges + .remove(&DfgNode::PhiNode(*basic_block_start)); + } + } + for dfg_edges in self.dfg_forward_edges.values() { + for dfg_edge in dfg_edges.iter() { + self.dfg_reverse_edges + .entry(dfg_edge.destination.clone()) + .or_default() + .insert(dfg_edge.clone()); + } + } + } +} diff --git a/rbpf/src/syscalls.rs b/rbpf/src/syscalls.rs new file mode 100644 index 00000000000000..b21930dc3eeaef --- /dev/null +++ b/rbpf/src/syscalls.rs @@ -0,0 +1,195 @@ +#![allow(clippy::arithmetic_side_effects)] +#![allow(clippy::too_many_arguments)] +// Copyright 2015 Big Switch Networks, Inc +// (Algorithms for uBPF syscalls, originally in C) +// Copyright 2016 6WIND S.A. +// (Translation to Rust, other syscalls) +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +//! This module implements some built-in syscalls that can be called from within an eBPF program. +//! +//! These syscalls may originate from several places: +//! +//! * Some of them mimic the syscalls available in the Linux kernel. +//! * Some of them were proposed as example syscalls in uBPF and they were adapted here. +//! * Other syscalls may be specific to rbpf. +//! +//! The prototype for syscalls is always the same: five `u64` as arguments, and a `u64` as a return +//! value. Hence some syscalls have unused arguments, or return a 0 value in all cases, in order to +//! respect this convention. + +use crate::{ + declare_builtin_function, + error::EbpfError, + memory_region::{AccessType, MemoryMapping}, + vm::TestContextObject, +}; +use std::{slice::from_raw_parts, str::from_utf8}; + +declare_builtin_function!( + /// Prints its **last three** arguments to standard output. The **first two** arguments are + /// **unused**. Returns the number of bytes written. + SyscallTracePrintf, + fn rust( + _context_object: &mut TestContextObject, + _arg1: u64, + _arg2: u64, + arg3: u64, + arg4: u64, + arg5: u64, + _memory_mapping: &mut MemoryMapping, + ) -> Result> { + println!("bpf_trace_printf: {arg3:#x}, {arg4:#x}, {arg5:#x}"); + let size_arg = |x| { + if x == 0 { + 1 + } else { + (x as f64).log(16.0).floor() as u64 + 1 + } + }; + Ok("bpf_trace_printf: 0x, 0x, 0x\n".len() as u64 + + size_arg(arg3) + + size_arg(arg4) + + size_arg(arg5)) + } +); + +declare_builtin_function!( + /// The idea is to assemble five bytes into a single `u64`. For compatibility with the syscalls API, + /// each argument must be a `u64`. + SyscallGatherBytes, + fn rust( + _context_object: &mut TestContextObject, + arg1: u64, + arg2: u64, + arg3: u64, + arg4: u64, + arg5: u64, + _memory_mapping: &mut MemoryMapping, + ) -> Result> { + Ok(arg1.wrapping_shl(32) + | arg2.wrapping_shl(24) + | arg3.wrapping_shl(16) + | arg4.wrapping_shl(8) + | arg5) + } +); + +declare_builtin_function!( + /// Same as `void *memfrob(void *s, size_t n);` in `string.h` in C. See the GNU manual page (in + /// section 3) for `memfrob`. The memory is directly modified, and the syscall returns 0 in all + /// cases. Arguments 3 to 5 are unused. + SyscallMemFrob, + fn rust( + _context_object: &mut TestContextObject, + vm_addr: u64, + len: u64, + _arg3: u64, + _arg4: u64, + _arg5: u64, + memory_mapping: &mut MemoryMapping, + ) -> Result> { + let host_addr: Result = + memory_mapping.map(AccessType::Store, vm_addr, len).into(); + let host_addr = host_addr?; + for i in 0..len { + unsafe { + let p = (host_addr + i) as *mut u8; + *p ^= 0b101010; + } + } + Ok(0) + } +); + +declare_builtin_function!( + /// C-like `strcmp`, return 0 if the strings are equal, and a non-null value otherwise. + SyscallStrCmp, + fn rust( + _context_object: &mut TestContextObject, + arg1: u64, + arg2: u64, + _arg3: u64, + _arg4: u64, + _arg5: u64, + memory_mapping: &mut MemoryMapping, + ) -> Result> { + // C-like strcmp, maybe shorter than converting the bytes to string and comparing? + if arg1 == 0 || arg2 == 0 { + return Ok(u64::MAX); + } + let a: Result = memory_mapping.map(AccessType::Load, arg1, 1).into(); + let mut a = a?; + let b: Result = memory_mapping.map(AccessType::Load, arg2, 1).into(); + let mut b = b?; + unsafe { + let mut a_val = *(a as *const u8); + let mut b_val = *(b as *const u8); + while a_val == b_val && a_val != 0 && b_val != 0 { + a += 1; + b += 1; + a_val = *(a as *const u8); + b_val = *(b as *const u8); + } + if a_val >= b_val { + Ok((a_val - b_val) as u64) + } else { + Ok((b_val - a_val) as u64) + } + } + } +); + +declare_builtin_function!( + /// Prints a NULL-terminated UTF-8 string. + SyscallString, + fn rust( + _context_object: &mut TestContextObject, + vm_addr: u64, + len: u64, + _arg3: u64, + _arg4: u64, + _arg5: u64, + memory_mapping: &mut MemoryMapping, + ) -> Result> { + let host_addr: Result = + memory_mapping.map(AccessType::Load, vm_addr, len).into(); + let host_addr = host_addr?; + let c_buf: *const i8 = host_addr as *const i8; + unsafe { + for i in 0..len { + let c = std::ptr::read(c_buf.offset(i as isize)); + if c == 0 { + break; + } + } + let message = from_utf8(from_raw_parts(host_addr as *const u8, len as usize)) + .unwrap_or("Invalid UTF-8 String"); + println!("log: {message}"); + } + Ok(0) + } +); + +declare_builtin_function!( + /// Prints the five arguments formated as u64 in decimal. + SyscallU64, + fn rust( + _context_object: &mut TestContextObject, + arg1: u64, + arg2: u64, + arg3: u64, + arg4: u64, + arg5: u64, + memory_mapping: &mut MemoryMapping, + ) -> Result> { + println!( + "dump_64: {:#x}, {:#x}, {:#x}, {:#x}, {:#x}, {:?}", + arg1, arg2, arg3, arg4, arg5, memory_mapping as *const _ + ); + Ok(0) + } +); diff --git a/rbpf/src/verifier.rs b/rbpf/src/verifier.rs new file mode 100644 index 00000000000000..63cf11297555b7 --- /dev/null +++ b/rbpf/src/verifier.rs @@ -0,0 +1,396 @@ +#![allow(clippy::arithmetic_side_effects)] +// Derived from uBPF +// Copyright 2015 Big Switch Networks, Inc +// (uBPF: safety checks, originally in C) +// Copyright 2016 6WIND S.A. +// (Translation to Rust) +// Copyright 2020 Solana Maintainers +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +//! This “verifier” performs simple checks when the eBPF program is loaded into the VM (before it is +//! interpreted or JIT-compiled). It has nothing to do with the much more elaborated verifier inside +//! Linux kernel. There is no verification regarding the program flow control (should be a Direct +//! Acyclic Graph) or the consistency for registers usage (the verifier of the kernel assigns types +//! to the registers and is much stricter). +//! +//! On the other hand, rbpf is not expected to run in kernel space. +//! +//! Improving the verifier would be nice, but this is not trivial (and Linux kernel is under GPL +//! license, so we cannot copy it). +//! +//! Contrary to the verifier of the Linux kernel, this one does not modify the bytecode at all. + +use crate::{ + ebpf, + program::{FunctionRegistry, SBPFVersion}, + vm::Config, +}; +use thiserror::Error; + +/// Error definitions +#[derive(Debug, Error, Eq, PartialEq)] +pub enum VerifierError { + /// ProgramLengthNotMultiple + #[error("program length must be a multiple of {} octets", ebpf::INSN_SIZE)] + ProgramLengthNotMultiple, + /// Deprecated + #[error("Deprecated")] + ProgramTooLarge(usize), + /// NoProgram + #[error("no program set, call prog_set() to load one")] + NoProgram, + /// Division by zero + #[error("division by 0 (insn #{0})")] + DivisionByZero(usize), + /// UnsupportedLEBEArgument + #[error("unsupported argument for LE/BE (insn #{0})")] + UnsupportedLEBEArgument(usize), + /// LDDWCannotBeLast + #[error("LD_DW instruction cannot be last in program")] + LDDWCannotBeLast, + /// IncompleteLDDW + #[error("incomplete LD_DW instruction (insn #{0})")] + IncompleteLDDW(usize), + /// InfiniteLoop + #[error("infinite loop (insn #{0})")] + InfiniteLoop(usize), + /// JumpOutOfCode + #[error("jump out of code to #{0} (insn #{1})")] + JumpOutOfCode(usize, usize), + /// JumpToMiddleOfLDDW + #[error("jump to middle of LD_DW at #{0} (insn #{1})")] + JumpToMiddleOfLDDW(usize, usize), + /// InvalidSourceRegister + #[error("invalid source register (insn #{0})")] + InvalidSourceRegister(usize), + /// CannotWriteR10 + #[error("cannot write into register r10 (insn #{0})")] + CannotWriteR10(usize), + /// InvalidDestinationRegister + #[error("invalid destination register (insn #{0})")] + InvalidDestinationRegister(usize), + /// UnknownOpCode + #[error("unknown eBPF opcode {0:#2x} (insn #{1:?})")] + UnknownOpCode(u8, usize), + /// Shift with overflow + #[error("Shift with overflow of {0}-bit value by {1} (insn #{2:?})")] + ShiftWithOverflow(u64, u64, usize), + /// Invalid register specified + #[error("Invalid register specified at instruction {0}")] + InvalidRegister(usize), + /// Invalid function + #[error("Invalid function at instruction {0}")] + InvalidFunction(usize), +} + +/// eBPF Verifier +pub trait Verifier { + /// eBPF verification function that returns an error if the program does not meet its requirements. + /// + /// Some examples of things the verifier may reject the program for: + /// + /// - Program does not terminate. + /// - Unknown instructions. + /// - Bad formed instruction. + /// - Unknown eBPF syscall index. + fn verify( + prog: &[u8], + config: &Config, + sbpf_version: &SBPFVersion, + function_registry: &FunctionRegistry, + ) -> Result<(), VerifierError>; +} + +fn check_prog_len(prog: &[u8]) -> Result<(), VerifierError> { + if prog.len() % ebpf::INSN_SIZE != 0 { + return Err(VerifierError::ProgramLengthNotMultiple); + } + if prog.is_empty() { + return Err(VerifierError::NoProgram); + } + Ok(()) +} + +fn check_imm_nonzero(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), VerifierError> { + if insn.imm == 0 { + return Err(VerifierError::DivisionByZero(insn_ptr)); + } + Ok(()) +} + +fn check_imm_endian(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), VerifierError> { + match insn.imm { + 16 | 32 | 64 => Ok(()), + _ => Err(VerifierError::UnsupportedLEBEArgument(insn_ptr)), + } +} + +fn check_load_dw(prog: &[u8], insn_ptr: usize) -> Result<(), VerifierError> { + if (insn_ptr + 1) * ebpf::INSN_SIZE >= prog.len() { + // Last instruction cannot be LD_DW because there would be no 2nd DW + return Err(VerifierError::LDDWCannotBeLast); + } + let next_insn = ebpf::get_insn(prog, insn_ptr + 1); + if next_insn.opc != 0 { + return Err(VerifierError::IncompleteLDDW(insn_ptr)); + } + Ok(()) +} + +fn check_jmp_offset( + prog: &[u8], + insn_ptr: usize, + function_range: &std::ops::Range, +) -> Result<(), VerifierError> { + let insn = ebpf::get_insn(prog, insn_ptr); + + let dst_insn_ptr = insn_ptr as isize + 1 + insn.off as isize; + if dst_insn_ptr < 0 || !function_range.contains(&(dst_insn_ptr as usize)) { + return Err(VerifierError::JumpOutOfCode( + dst_insn_ptr as usize, + insn_ptr, + )); + } + let dst_insn = ebpf::get_insn(prog, dst_insn_ptr as usize); + if dst_insn.opc == 0 { + return Err(VerifierError::JumpToMiddleOfLDDW( + dst_insn_ptr as usize, + insn_ptr, + )); + } + Ok(()) +} + +fn check_registers( + insn: &ebpf::Insn, + store: bool, + insn_ptr: usize, + sbpf_version: &SBPFVersion, +) -> Result<(), VerifierError> { + if insn.src > 10 { + return Err(VerifierError::InvalidSourceRegister(insn_ptr)); + } + + match (insn.dst, store) { + (0..=9, _) | (10, true) => Ok(()), + (11, _) if sbpf_version.dynamic_stack_frames() && insn.opc == ebpf::ADD64_IMM => Ok(()), + (10, false) => Err(VerifierError::CannotWriteR10(insn_ptr)), + (_, _) => Err(VerifierError::InvalidDestinationRegister(insn_ptr)), + } +} + +/// Check that the imm is a valid shift operand +fn check_imm_shift(insn: &ebpf::Insn, insn_ptr: usize, imm_bits: u64) -> Result<(), VerifierError> { + let shift_by = insn.imm as u64; + if insn.imm < 0 || shift_by >= imm_bits { + return Err(VerifierError::ShiftWithOverflow( + shift_by, imm_bits, insn_ptr, + )); + } + Ok(()) +} + +/// Check that callx has a valid register number +fn check_callx_register( + insn: &ebpf::Insn, + insn_ptr: usize, + config: &Config, + sbpf_version: &SBPFVersion, +) -> Result<(), VerifierError> { + let reg = if sbpf_version.callx_uses_src_reg() { + insn.src as i64 + } else { + insn.imm + }; + if !(0..=10).contains(®) || (reg == 10 && config.reject_callx_r10) { + return Err(VerifierError::InvalidRegister(insn_ptr)); + } + Ok(()) +} + +/// Mandatory verifier for solana programs to run on-chain +#[derive(Debug)] +pub struct RequisiteVerifier {} +impl Verifier for RequisiteVerifier { + /// Check the program against the verifier's rules + #[rustfmt::skip] + fn verify(prog: &[u8], config: &Config, sbpf_version: &SBPFVersion, function_registry: &FunctionRegistry) -> Result<(), VerifierError> { + check_prog_len(prog)?; + + let program_range = 0..prog.len() / ebpf::INSN_SIZE; + let mut function_iter = function_registry.keys().map(|insn_ptr| insn_ptr as usize).peekable(); + let mut function_range = program_range.start..program_range.end; + let mut insn_ptr: usize = 0; + while (insn_ptr + 1) * ebpf::INSN_SIZE <= prog.len() { + let insn = ebpf::get_insn(prog, insn_ptr); + let mut store = false; + + if sbpf_version.static_syscalls() && function_iter.peek() == Some(&insn_ptr) { + function_range.start = function_iter.next().unwrap_or(0); + function_range.end = *function_iter.peek().unwrap_or(&program_range.end); + let insn = ebpf::get_insn(prog, function_range.end.saturating_sub(1)); + match insn.opc { + ebpf::JA | ebpf::EXIT => {}, + _ => return Err(VerifierError::InvalidFunction( + function_range.end.saturating_sub(1), + )), + } + } + + match insn.opc { + ebpf::LD_DW_IMM if !sbpf_version.disable_lddw() => { + check_load_dw(prog, insn_ptr)?; + insn_ptr += 1; + }, + + // BPF_LDX class + ebpf::LD_B_REG => {}, + ebpf::LD_H_REG => {}, + ebpf::LD_W_REG => {}, + ebpf::LD_DW_REG => {}, + + // BPF_ST class + ebpf::ST_B_IMM => store = true, + ebpf::ST_H_IMM => store = true, + ebpf::ST_W_IMM => store = true, + ebpf::ST_DW_IMM => store = true, + + // BPF_STX class + ebpf::ST_B_REG => store = true, + ebpf::ST_H_REG => store = true, + ebpf::ST_W_REG => store = true, + ebpf::ST_DW_REG => store = true, + + // BPF_ALU class + ebpf::ADD32_IMM => {}, + ebpf::ADD32_REG => {}, + ebpf::SUB32_IMM => {}, + ebpf::SUB32_REG => {}, + ebpf::MUL32_IMM if !sbpf_version.enable_pqr() => {}, + ebpf::MUL32_REG if !sbpf_version.enable_pqr() => {}, + ebpf::DIV32_IMM if !sbpf_version.enable_pqr() => { check_imm_nonzero(&insn, insn_ptr)?; }, + ebpf::DIV32_REG if !sbpf_version.enable_pqr() => {}, + ebpf::OR32_IMM => {}, + ebpf::OR32_REG => {}, + ebpf::AND32_IMM => {}, + ebpf::AND32_REG => {}, + ebpf::LSH32_IMM => { check_imm_shift(&insn, insn_ptr, 32)?; }, + ebpf::LSH32_REG => {}, + ebpf::RSH32_IMM => { check_imm_shift(&insn, insn_ptr, 32)?; }, + ebpf::RSH32_REG => {}, + ebpf::NEG32 if sbpf_version.enable_neg() => {}, + ebpf::MOD32_IMM if !sbpf_version.enable_pqr() => { check_imm_nonzero(&insn, insn_ptr)?; }, + ebpf::MOD32_REG if !sbpf_version.enable_pqr() => {}, + ebpf::XOR32_IMM => {}, + ebpf::XOR32_REG => {}, + ebpf::MOV32_IMM => {}, + ebpf::MOV32_REG => {}, + ebpf::ARSH32_IMM => { check_imm_shift(&insn, insn_ptr, 32)?; }, + ebpf::ARSH32_REG => {}, + ebpf::LE if sbpf_version.enable_le() => { check_imm_endian(&insn, insn_ptr)?; }, + ebpf::BE => { check_imm_endian(&insn, insn_ptr)?; }, + + // BPF_ALU64 class + ebpf::ADD64_IMM => {}, + ebpf::ADD64_REG => {}, + ebpf::SUB64_IMM => {}, + ebpf::SUB64_REG => {}, + ebpf::MUL64_IMM if !sbpf_version.enable_pqr() => {}, + ebpf::MUL64_REG if !sbpf_version.enable_pqr() => {}, + ebpf::DIV64_IMM if !sbpf_version.enable_pqr() => { check_imm_nonzero(&insn, insn_ptr)?; }, + ebpf::DIV64_REG if !sbpf_version.enable_pqr() => {}, + ebpf::OR64_IMM => {}, + ebpf::OR64_REG => {}, + ebpf::AND64_IMM => {}, + ebpf::AND64_REG => {}, + ebpf::LSH64_IMM => { check_imm_shift(&insn, insn_ptr, 64)?; }, + ebpf::LSH64_REG => {}, + ebpf::RSH64_IMM => { check_imm_shift(&insn, insn_ptr, 64)?; }, + ebpf::RSH64_REG => {}, + ebpf::NEG64 if sbpf_version.enable_neg() => {}, + ebpf::MOD64_IMM if !sbpf_version.enable_pqr() => { check_imm_nonzero(&insn, insn_ptr)?; }, + ebpf::MOD64_REG if !sbpf_version.enable_pqr() => {}, + ebpf::XOR64_IMM => {}, + ebpf::XOR64_REG => {}, + ebpf::MOV64_IMM => {}, + ebpf::MOV64_REG => {}, + ebpf::ARSH64_IMM => { check_imm_shift(&insn, insn_ptr, 64)?; }, + ebpf::ARSH64_REG => {}, + ebpf::HOR64_IMM if sbpf_version.disable_lddw() => {}, + + // BPF_PQR class + ebpf::LMUL32_IMM if sbpf_version.enable_pqr() => {}, + ebpf::LMUL32_REG if sbpf_version.enable_pqr() => {}, + ebpf::LMUL64_IMM if sbpf_version.enable_pqr() => {}, + ebpf::LMUL64_REG if sbpf_version.enable_pqr() => {}, + ebpf::UHMUL64_IMM if sbpf_version.enable_pqr() => {}, + ebpf::UHMUL64_REG if sbpf_version.enable_pqr() => {}, + ebpf::SHMUL64_IMM if sbpf_version.enable_pqr() => {}, + ebpf::SHMUL64_REG if sbpf_version.enable_pqr() => {}, + ebpf::UDIV32_IMM if sbpf_version.enable_pqr() => { check_imm_nonzero(&insn, insn_ptr)?; }, + ebpf::UDIV32_REG if sbpf_version.enable_pqr() => {}, + ebpf::UDIV64_IMM if sbpf_version.enable_pqr() => { check_imm_nonzero(&insn, insn_ptr)?; }, + ebpf::UDIV64_REG if sbpf_version.enable_pqr() => {}, + ebpf::UREM32_IMM if sbpf_version.enable_pqr() => { check_imm_nonzero(&insn, insn_ptr)?; }, + ebpf::UREM32_REG if sbpf_version.enable_pqr() => {}, + ebpf::UREM64_IMM if sbpf_version.enable_pqr() => { check_imm_nonzero(&insn, insn_ptr)?; }, + ebpf::UREM64_REG if sbpf_version.enable_pqr() => {}, + ebpf::SDIV32_IMM if sbpf_version.enable_pqr() => { check_imm_nonzero(&insn, insn_ptr)?; }, + ebpf::SDIV32_REG if sbpf_version.enable_pqr() => {}, + ebpf::SDIV64_IMM if sbpf_version.enable_pqr() => { check_imm_nonzero(&insn, insn_ptr)?; }, + ebpf::SDIV64_REG if sbpf_version.enable_pqr() => {}, + ebpf::SREM32_IMM if sbpf_version.enable_pqr() => { check_imm_nonzero(&insn, insn_ptr)?; }, + ebpf::SREM32_REG if sbpf_version.enable_pqr() => {}, + ebpf::SREM64_IMM if sbpf_version.enable_pqr() => { check_imm_nonzero(&insn, insn_ptr)?; }, + ebpf::SREM64_REG if sbpf_version.enable_pqr() => {}, + + // BPF_JMP class + ebpf::JA => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JEQ_IMM => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JEQ_REG => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JGT_IMM => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JGT_REG => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JGE_IMM => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JGE_REG => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JLT_IMM => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JLT_REG => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JLE_IMM => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JLE_REG => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JSET_IMM => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JSET_REG => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JNE_IMM => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JNE_REG => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JSGT_IMM => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JSGT_REG => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JSGE_IMM => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JSGE_REG => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JSLT_IMM => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JSLT_REG => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JSLE_IMM => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::JSLE_REG => { check_jmp_offset(prog, insn_ptr, &function_range)?; }, + ebpf::CALL_IMM if sbpf_version.static_syscalls() && insn.src != 0 => { check_jmp_offset(prog, insn_ptr, &program_range)?; }, + ebpf::CALL_IMM => {}, + ebpf::CALL_REG => { check_callx_register(&insn, insn_ptr, config, sbpf_version)?; }, + ebpf::EXIT => {}, + + _ => { + return Err(VerifierError::UnknownOpCode(insn.opc, insn_ptr)); + } + } + + check_registers(&insn, store, insn_ptr, sbpf_version)?; + + insn_ptr += 1; + } + + // insn_ptr should now be equal to number of instructions. + if insn_ptr != prog.len() / ebpf::INSN_SIZE { + return Err(VerifierError::JumpOutOfCode(insn_ptr, insn_ptr)); + } + + Ok(()) + } +} diff --git a/rbpf/src/vm.rs b/rbpf/src/vm.rs new file mode 100644 index 00000000000000..9f6dbd0bdf1bdb --- /dev/null +++ b/rbpf/src/vm.rs @@ -0,0 +1,439 @@ +#![allow(clippy::arithmetic_side_effects)] +// Derived from uBPF +// Copyright 2015 Big Switch Networks, Inc +// (uBPF: VM architecture, parts of the interpreter, originally in C) +// Copyright 2016 6WIND S.A. +// (Translation to Rust, MetaBuff/multiple classes addition, hashmaps for syscalls) +// Copyright 2020 Solana Maintainers +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +//! Virtual machine for eBPF programs. + +use crate::{ + ebpf, + elf::Executable, + error::{EbpfError, ProgramResult}, + interpreter::Interpreter, + memory_region::MemoryMapping, + program::{BuiltinFunction, BuiltinProgram, FunctionRegistry, SBPFVersion}, + static_analysis::{Analysis, TraceLogEntry}, +}; +use rand::Rng; +use std::{collections::BTreeMap, fmt::Debug, sync::Arc}; + +/// Shift the RUNTIME_ENVIRONMENT_KEY by this many bits to the LSB +/// +/// 3 bits for 8 Byte alignment, and 1 bit to have encoding space for the RuntimeEnvironment. +const PROGRAM_ENVIRONMENT_KEY_SHIFT: u32 = 4; +static RUNTIME_ENVIRONMENT_KEY: std::sync::OnceLock = std::sync::OnceLock::::new(); + +/// Returns (and if not done before generates) the encryption key for the VM pointer +pub fn get_runtime_environment_key() -> i32 { + *RUNTIME_ENVIRONMENT_KEY + .get_or_init(|| rand::thread_rng().gen::() >> PROGRAM_ENVIRONMENT_KEY_SHIFT) +} + +/// VM configuration settings +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct Config { + /// Maximum call depth + pub max_call_depth: usize, + /// Size of a stack frame in bytes, must match the size specified in the LLVM BPF backend + pub stack_frame_size: usize, + /// Enables the use of MemoryMapping and MemoryRegion for address translation + pub enable_address_translation: bool, + /// Enables gaps in VM address space between the stack frames + pub enable_stack_frame_gaps: bool, + /// Maximal pc distance after which a new instruction meter validation is emitted by the JIT + pub instruction_meter_checkpoint_distance: usize, + /// Enable instruction meter and limiting + pub enable_instruction_meter: bool, + /// Enable instruction tracing + pub enable_instruction_tracing: bool, + /// Enable dynamic string allocation for labels + pub enable_symbol_and_section_labels: bool, + /// Reject ELF files containing issues that the verifier did not catch before (up to v0.2.21) + pub reject_broken_elfs: bool, + /// Ratio of native host instructions per random no-op in JIT (0 = OFF) + pub noop_instruction_rate: u32, + /// Enable disinfection of immediate values and offsets provided by the user in JIT + pub sanitize_user_provided_values: bool, + /// Throw ElfError::SymbolHashCollision when a BPF function collides with a registered syscall + pub external_internal_function_hash_collision: bool, + /// Have the verifier reject "callx r10" + pub reject_callx_r10: bool, + /// Avoid copying read only sections when possible + pub optimize_rodata: bool, + /// Use the new ELF parser + pub new_elf_parser: bool, + /// Use aligned memory mapping + pub aligned_memory_mapping: bool, + /// Allow ExecutableCapability::V1 + pub enable_sbpf_v1: bool, + /// Allow ExecutableCapability::V2 + pub enable_sbpf_v2: bool, +} + +impl Config { + /// Returns the size of the stack memory region + pub fn stack_size(&self) -> usize { + self.stack_frame_size * self.max_call_depth + } +} + +impl Default for Config { + fn default() -> Self { + Self { + max_call_depth: 20, + stack_frame_size: 4_096, + enable_address_translation: true, + enable_stack_frame_gaps: true, + instruction_meter_checkpoint_distance: 10000, + enable_instruction_meter: true, + enable_instruction_tracing: false, + enable_symbol_and_section_labels: false, + reject_broken_elfs: false, + noop_instruction_rate: 256, + sanitize_user_provided_values: true, + external_internal_function_hash_collision: true, + reject_callx_r10: true, + optimize_rodata: true, + new_elf_parser: true, + aligned_memory_mapping: true, + enable_sbpf_v1: true, + enable_sbpf_v2: true, + } + } +} + +/// Static constructors for Executable +impl Executable { + /// Creates an executable from an ELF file + pub fn from_elf(elf_bytes: &[u8], loader: Arc>) -> Result { + let executable = Executable::load(elf_bytes, loader)?; + Ok(executable) + } + /// Creates an executable from machine code + pub fn from_text_bytes( + text_bytes: &[u8], + loader: Arc>, + sbpf_version: SBPFVersion, + function_registry: FunctionRegistry, + ) -> Result { + Executable::new_from_text_bytes(text_bytes, loader, sbpf_version, function_registry) + .map_err(EbpfError::ElfError) + } +} + +/// Runtime context +pub trait ContextObject { + /// Called for every instruction executed when tracing is enabled + fn trace(&mut self, state: [u64; 12]); + /// Consume instructions from meter + fn consume(&mut self, amount: u64); + /// Get the number of remaining instructions allowed + fn get_remaining(&self) -> u64; +} + +/// Simple instruction meter for testing +#[derive(Debug, Clone, Default)] +pub struct TestContextObject { + /// Contains the register state at every instruction in order of execution + pub trace_log: Vec, + /// Maximal amount of instructions which still can be executed + pub remaining: u64, +} + +impl ContextObject for TestContextObject { + fn trace(&mut self, state: [u64; 12]) { + self.trace_log.push(state); + } + + fn consume(&mut self, amount: u64) { + self.remaining = self.remaining.saturating_sub(amount); + } + + fn get_remaining(&self) -> u64 { + self.remaining + } +} + +impl TestContextObject { + /// Initialize with instruction meter + pub fn new(remaining: u64) -> Self { + Self { + trace_log: Vec::new(), + remaining, + } + } + + /// Compares an interpreter trace and a JIT trace. + /// + /// The log of the JIT can be longer because it only validates the instruction meter at branches. + pub fn compare_trace_log(interpreter: &Self, jit: &Self) -> bool { + let interpreter = interpreter.trace_log.as_slice(); + let mut jit = jit.trace_log.as_slice(); + if jit.len() > interpreter.len() { + jit = &jit[0..interpreter.len()]; + } + interpreter == jit + } +} + +/// Statistic of taken branches (from a recorded trace) +pub struct DynamicAnalysis { + /// Maximal edge counter value + pub edge_counter_max: usize, + /// src_node, dst_node, edge_counter + pub edges: BTreeMap>, +} + +impl DynamicAnalysis { + /// Accumulates a trace + pub fn new(trace_log: &[[u64; 12]], analysis: &Analysis) -> Self { + let mut result = Self { + edge_counter_max: 0, + edges: BTreeMap::new(), + }; + let mut last_basic_block = usize::MAX; + for traced_instruction in trace_log.iter() { + let pc = traced_instruction[11] as usize; + if analysis.cfg_nodes.contains_key(&pc) { + let counter = result + .edges + .entry(last_basic_block) + .or_default() + .entry(pc) + .or_insert(0); + *counter += 1; + result.edge_counter_max = result.edge_counter_max.max(*counter); + last_basic_block = pc; + } + } + result + } +} + +/// A call frame used for function calls inside the Interpreter +#[derive(Clone, Default)] +pub struct CallFrame { + /// The caller saved registers + pub caller_saved_registers: [u64; ebpf::SCRATCH_REGS], + /// The callers frame pointer + pub frame_pointer: u64, + /// The target_pc of the exit instruction which returns back to the caller + pub target_pc: u64, +} + +/// A virtual machine to run eBPF programs. +/// +/// # Examples +/// +/// ``` +/// use solana_rbpf::{ +/// aligned_memory::AlignedMemory, +/// ebpf, +/// elf::Executable, +/// memory_region::{MemoryMapping, MemoryRegion}, +/// program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, +/// verifier::RequisiteVerifier, +/// vm::{Config, EbpfVm, TestContextObject}, +/// }; +/// +/// let prog = &[ +/// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit +/// ]; +/// let mem = &mut [ +/// 0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd +/// ]; +/// +/// let loader = std::sync::Arc::new(BuiltinProgram::new_mock()); +/// let function_registry = FunctionRegistry::default(); +/// let mut executable = Executable::::from_text_bytes(prog, loader.clone(), SBPFVersion::V2, function_registry).unwrap(); +/// executable.verify::().unwrap(); +/// let mut context_object = TestContextObject::new(1); +/// let sbpf_version = executable.get_sbpf_version(); +/// +/// let mut stack = AlignedMemory::<{ebpf::HOST_ALIGN}>::zero_filled(executable.get_config().stack_size()); +/// let stack_len = stack.len(); +/// let mut heap = AlignedMemory::<{ebpf::HOST_ALIGN}>::with_capacity(0); +/// +/// let regions: Vec = vec![ +/// executable.get_ro_region(), +/// MemoryRegion::new_writable( +/// stack.as_slice_mut(), +/// ebpf::MM_STACK_START, +/// ), +/// MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START), +/// MemoryRegion::new_writable(mem, ebpf::MM_INPUT_START), +/// ]; +/// +/// let memory_mapping = MemoryMapping::new(regions, executable.get_config(), sbpf_version).unwrap(); +/// +/// let mut vm = EbpfVm::new(loader, sbpf_version, &mut context_object, memory_mapping, stack_len); +/// +/// let (instruction_count, result) = vm.execute_program(&executable, true); +/// assert_eq!(instruction_count, 1); +/// assert_eq!(result.unwrap(), 0); +/// ``` +#[repr(C)] +pub struct EbpfVm<'a, C: ContextObject> { + /// Needed to exit from the guest back into the host + pub host_stack_pointer: *mut u64, + /// The current call depth. + /// + /// Incremented on calls and decremented on exits. It's used to enforce + /// config.max_call_depth and to know when to terminate execution. + pub call_depth: u64, + /// Guest stack pointer (r11). + /// + /// The stack pointer isn't exposed as an actual register. Only sub and add + /// instructions (typically generated by the LLVM backend) are allowed to + /// access it when sbpf_version.dynamic_stack_frames()=true. Its value is only + /// stored here and therefore the register is not tracked in REGISTER_MAP. + pub stack_pointer: u64, + /// Pointer to ContextObject + pub context_object_pointer: &'a mut C, + /// Last return value of instruction_meter.get_remaining() + pub previous_instruction_meter: u64, + /// Outstanding value to instruction_meter.consume() + pub due_insn_count: u64, + /// CPU cycles accumulated by the stop watch + pub stopwatch_numerator: u64, + /// Number of times the stop watch was used + pub stopwatch_denominator: u64, + /// Registers inlined + pub registers: [u64; 12], + /// ProgramResult inlined + pub program_result: ProgramResult, + /// MemoryMapping inlined + pub memory_mapping: MemoryMapping<'a>, + /// Stack of CallFrames used by the Interpreter + pub call_frames: Vec, + /// Loader built-in program + pub loader: Arc>, + /// TCP port for the debugger interface + #[cfg(feature = "debugger")] + pub debug_port: Option, +} + +impl<'a, C: ContextObject> EbpfVm<'a, C> { + /// Creates a new virtual machine instance. + pub fn new( + loader: Arc>, + sbpf_version: &SBPFVersion, + context_object: &'a mut C, + mut memory_mapping: MemoryMapping<'a>, + stack_len: usize, + ) -> Self { + let config = loader.get_config(); + let stack_pointer = + ebpf::MM_STACK_START.saturating_add(if sbpf_version.dynamic_stack_frames() { + // the stack is fully descending, frames start as empty and change size anytime r11 is modified + stack_len + } else { + // within a frame the stack grows down, but frames are ascending + config.stack_frame_size + } as u64); + if !config.enable_address_translation { + memory_mapping = MemoryMapping::new_identity(); + } + EbpfVm { + host_stack_pointer: std::ptr::null_mut(), + call_depth: 0, + stack_pointer, + context_object_pointer: context_object, + previous_instruction_meter: 0, + due_insn_count: 0, + stopwatch_numerator: 0, + stopwatch_denominator: 0, + registers: [0u64; 12], + program_result: ProgramResult::Ok(0), + memory_mapping, + call_frames: vec![CallFrame::default(); config.max_call_depth], + loader, + #[cfg(feature = "debugger")] + debug_port: None, + } + } + + /// Execute the program + /// + /// If interpreted = `false` then the JIT compiled executable is used. + pub fn execute_program( + &mut self, + executable: &Executable, + interpreted: bool, + ) -> (u64, ProgramResult) { + debug_assert!(Arc::ptr_eq(&self.loader, executable.get_loader())); + // R1 points to beginning of input memory, R10 to the stack of the first frame, R11 is the pc (hidden) + self.registers[1] = ebpf::MM_INPUT_START; + self.registers[ebpf::FRAME_PTR_REG] = self.stack_pointer; + self.registers[11] = executable.get_entrypoint_instruction_offset() as u64; + let config = executable.get_config(); + let initial_insn_count = if config.enable_instruction_meter { + self.context_object_pointer.get_remaining() + } else { + 0 + }; + self.previous_instruction_meter = initial_insn_count; + self.due_insn_count = 0; + self.program_result = ProgramResult::Ok(0); + if interpreted { + #[cfg(feature = "debugger")] + let debug_port = self.debug_port.clone(); + let mut interpreter = Interpreter::new(self, executable, self.registers); + #[cfg(feature = "debugger")] + if let Some(debug_port) = debug_port { + crate::debugger::execute(&mut interpreter, debug_port); + } else { + while interpreter.step() {} + } + #[cfg(not(feature = "debugger"))] + while interpreter.step() {} + } else { + #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] + { + let compiled_program = match executable + .get_compiled_program() + .ok_or_else(|| EbpfError::JitNotCompiled) + { + Ok(compiled_program) => compiled_program, + Err(error) => return (0, ProgramResult::Err(error)), + }; + compiled_program.invoke(config, self, self.registers); + } + #[cfg(not(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64")))] + { + return (0, ProgramResult::Err(EbpfError::JitNotCompiled)); + } + }; + let instruction_count = if config.enable_instruction_meter { + self.context_object_pointer.consume(self.due_insn_count); + initial_insn_count.saturating_sub(self.context_object_pointer.get_remaining()) + } else { + 0 + }; + let mut result = ProgramResult::Ok(0); + std::mem::swap(&mut result, &mut self.program_result); + (instruction_count, result) + } + + /// Invokes a built-in function + pub fn invoke_function(&mut self, function: BuiltinFunction) { + function( + unsafe { + (self as *mut _ as *mut u64).offset(get_runtime_environment_key() as isize) + as *mut _ + }, + self.registers[1], + self.registers[2], + self.registers[3], + self.registers[4], + self.registers[5], + ); + } +} diff --git a/rbpf/src/x86.rs b/rbpf/src/x86.rs new file mode 100644 index 00000000000000..eaaa1384963b59 --- /dev/null +++ b/rbpf/src/x86.rs @@ -0,0 +1,712 @@ +#![allow(clippy::arithmetic_side_effects)] +use crate::{ + jit::{JitCompiler, OperandSize}, + vm::ContextObject, +}; + +macro_rules! exclude_operand_sizes { + ($size:expr, $($to_exclude:path)|+ $(,)?) => { + debug_assert!(match $size { + $($to_exclude)|+ => false, + _ => true, + }); + } +} + +pub const RAX: u8 = 0; +pub const RCX: u8 = 1; +pub const RDX: u8 = 2; +pub const RBX: u8 = 3; +pub const RSP: u8 = 4; +pub const RBP: u8 = 5; +pub const RSI: u8 = 6; +pub const RDI: u8 = 7; +pub const R8: u8 = 8; +pub const R9: u8 = 9; +pub const R10: u8 = 10; +pub const R11: u8 = 11; +pub const R12: u8 = 12; +pub const R13: u8 = 13; +pub const R14: u8 = 14; +pub const R15: u8 = 15; + +// System V AMD64 ABI +// Works on: Linux, macOS, BSD and Solaris but not on Windows +pub const ARGUMENT_REGISTERS: [u8; 6] = [RDI, RSI, RDX, RCX, R8, R9]; +pub const CALLER_SAVED_REGISTERS: [u8; 9] = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11]; +pub const CALLEE_SAVED_REGISTERS: [u8; 6] = [RBP, RBX, R12, R13, R14, R15]; + +struct X86Rex { + w: bool, + r: bool, + x: bool, + b: bool, +} + +struct X86ModRm { + mode: u8, + r: u8, + m: u8, +} + +struct X86Sib { + scale: u8, + index: u8, + base: u8, +} + +#[derive(Copy, Clone)] +pub enum X86IndirectAccess { + /// [second_operand + offset] + Offset(i32), + /// [second_operand + offset + index << shift] + OffsetIndexShift(i32, u8, u8), +} + +#[allow(dead_code)] +#[derive(Copy, Clone)] +pub enum FenceType { + /// lfence + Load = 5, + /// mfence + All = 6, + /// sfence + Store = 7, +} + +#[derive(Copy, Clone)] +pub struct X86Instruction { + size: OperandSize, + opcode_escape_sequence: u8, + opcode: u8, + modrm: bool, + indirect: Option, + first_operand: u8, + second_operand: u8, + immediate_size: OperandSize, + immediate: i64, +} + +impl X86Instruction { + pub const DEFAULT: X86Instruction = X86Instruction { + size: OperandSize::S0, + opcode_escape_sequence: 0, + opcode: 0, + modrm: true, + indirect: None, + first_operand: 0, + second_operand: 0, + immediate_size: OperandSize::S0, + immediate: 0, + }; + + #[inline] + pub fn emit(&self, jit: &mut JitCompiler) { + debug_assert!(!matches!(self.size, OperandSize::S0)); + let mut rex = X86Rex { + w: matches!(self.size, OperandSize::S64), + r: self.first_operand & 0b1000 != 0, + x: false, + b: self.second_operand & 0b1000 != 0, + }; + let mut modrm = X86ModRm { + mode: 0, + r: self.first_operand & 0b111, + m: self.second_operand & 0b111, + }; + let mut sib = X86Sib { + scale: 0, + index: 0, + base: 0, + }; + let mut displacement_size = OperandSize::S0; + let mut displacement = 0; + if self.modrm { + match self.indirect { + Some(X86IndirectAccess::Offset(offset)) => { + displacement = offset; + debug_assert_ne!(self.second_operand & 0b111, RSP); // Reserved for SIB addressing + if (-128..=127).contains(&displacement) + || (displacement == 0 && self.second_operand & 0b111 == RBP) + { + displacement_size = OperandSize::S8; + modrm.mode = 1; + } else { + displacement_size = OperandSize::S32; + modrm.mode = 2; + } + } + Some(X86IndirectAccess::OffsetIndexShift(offset, index, shift)) => { + displacement = offset; + displacement_size = OperandSize::S32; + modrm.mode = 2; + modrm.m = RSP; + rex.x = index & 0b1000 != 0; + sib.scale = shift & 0b11; + sib.index = index & 0b111; + sib.base = self.second_operand & 0b111; + } + None => { + modrm.mode = 3; + } + } + } + if matches!(self.size, OperandSize::S16) { + jit.emit::(0x66); + } + let rex = + ((rex.w as u8) << 3) | ((rex.r as u8) << 2) | ((rex.x as u8) << 1) | (rex.b as u8); + if rex != 0 { + jit.emit::(0x40 | rex); + } + match self.opcode_escape_sequence { + 1 => jit.emit::(0x0f), + 2 => jit.emit::(0x0f38), + 3 => jit.emit::(0x0f3a), + _ => {} + } + jit.emit::(self.opcode); + if self.modrm { + jit.emit::((modrm.mode << 6) | (modrm.r << 3) | modrm.m); + let sib = (sib.scale << 6) | (sib.index << 3) | sib.base; + if sib != 0 { + jit.emit::(sib); + } + jit.emit_variable_length(displacement_size, displacement as u64); + } + jit.emit_variable_length(self.immediate_size, self.immediate as u64); + } + + /// Arithmetic or logic + #[inline] + pub const fn alu( + size: OperandSize, + opcode: u8, + source: u8, + destination: u8, + immediate: i64, + indirect: Option, + ) -> Self { + exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8 | OperandSize::S16); + Self { + size, + opcode, + first_operand: source, + second_operand: destination, + immediate_size: match opcode { + 0xc1 => OperandSize::S8, + 0x81 => OperandSize::S32, + 0xf7 if source == 0 => OperandSize::S32, + _ => OperandSize::S0, + }, + immediate, + indirect, + ..X86Instruction::DEFAULT + } + } + + /// Move source to destination + #[inline] + pub const fn mov(size: OperandSize, source: u8, destination: u8) -> Self { + exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8 | OperandSize::S16); + Self { + size, + opcode: 0x89, + first_operand: source, + second_operand: destination, + ..Self::DEFAULT + } + } + + /// Conditionally move source to destination + #[inline] + pub const fn cmov(size: OperandSize, condition: u8, source: u8, destination: u8) -> Self { + exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8 | OperandSize::S16); + Self { + size, + opcode_escape_sequence: 1, + opcode: condition, + first_operand: destination, + second_operand: source, + ..Self::DEFAULT + } + } + + /// Swap source and destination + #[inline] + pub const fn xchg( + size: OperandSize, + source: u8, + destination: u8, + indirect: Option, + ) -> Self { + exclude_operand_sizes!( + size, + OperandSize::S0 | OperandSize::S8 | OperandSize::S16 | OperandSize::S32, + ); + Self { + size, + opcode: 0x87, + first_operand: source, + second_operand: destination, + indirect, + ..Self::DEFAULT + } + } + + /// Swap byte order of destination + #[inline] + pub const fn bswap(size: OperandSize, destination: u8) -> Self { + exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8); + match size { + OperandSize::S16 => Self { + size, + opcode: 0xc1, + second_operand: destination, + immediate_size: OperandSize::S8, + immediate: 8, + ..Self::DEFAULT + }, + OperandSize::S32 | OperandSize::S64 => Self { + size, + opcode_escape_sequence: 1, + opcode: 0xc8 | (destination & 0b111), + modrm: false, + second_operand: destination, + ..Self::DEFAULT + }, + _ => unimplemented!(), + } + } + + /// Test source and destination + #[inline] + pub const fn test( + size: OperandSize, + source: u8, + destination: u8, + indirect: Option, + ) -> Self { + exclude_operand_sizes!(size, OperandSize::S0); + Self { + size, + opcode: if let OperandSize::S8 = size { + 0x84 + } else { + 0x85 + }, + first_operand: source, + second_operand: destination, + indirect, + ..Self::DEFAULT + } + } + + /// Test immediate and destination + #[inline] + pub const fn test_immediate( + size: OperandSize, + destination: u8, + immediate: i64, + indirect: Option, + ) -> Self { + exclude_operand_sizes!(size, OperandSize::S0); + Self { + size, + opcode: if let OperandSize::S8 = size { + 0xf6 + } else { + 0xf7 + }, + first_operand: RAX, + second_operand: destination, + immediate_size: if let OperandSize::S64 = size { + OperandSize::S32 + } else { + size + }, + immediate, + indirect, + ..Self::DEFAULT + } + } + + /// Compare source and destination + #[inline] + pub const fn cmp( + size: OperandSize, + source: u8, + destination: u8, + indirect: Option, + ) -> Self { + exclude_operand_sizes!(size, OperandSize::S0); + Self { + size, + opcode: if let OperandSize::S8 = size { + 0x38 + } else { + 0x39 + }, + first_operand: source, + second_operand: destination, + indirect, + ..Self::DEFAULT + } + } + + /// Compare immediate and destination + #[inline] + pub const fn cmp_immediate( + size: OperandSize, + destination: u8, + immediate: i64, + indirect: Option, + ) -> Self { + exclude_operand_sizes!(size, OperandSize::S0); + Self { + size, + opcode: if let OperandSize::S8 = size { + 0x80 + } else { + 0x81 + }, + first_operand: RDI, + second_operand: destination, + immediate_size: if let OperandSize::S64 = size { + OperandSize::S32 + } else { + size + }, + immediate, + indirect, + ..Self::DEFAULT + } + } + + /// Load effective address of source into destination + #[inline] + pub const fn lea( + size: OperandSize, + source: u8, + destination: u8, + indirect: Option, + ) -> Self { + exclude_operand_sizes!( + size, + OperandSize::S0 | OperandSize::S8 | OperandSize::S16 | OperandSize::S32, + ); + Self { + size, + opcode: 0x8d, + first_operand: destination, + second_operand: source, + indirect, + ..Self::DEFAULT + } + } + + /// Convert word to doubleword or doubleword to quadword + #[inline] + pub const fn sign_extend_rax_rdx(size: OperandSize) -> Self { + exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8 | OperandSize::S16); + Self { + size, + opcode: 0x99, + modrm: false, + ..X86Instruction::DEFAULT + } + } + + /// Load destination from [source + offset] + #[inline] + pub const fn load( + size: OperandSize, + source: u8, + destination: u8, + indirect: X86IndirectAccess, + ) -> Self { + exclude_operand_sizes!(size, OperandSize::S0); + Self { + size: if let OperandSize::S64 = size { + OperandSize::S64 + } else { + OperandSize::S32 + }, + opcode_escape_sequence: match size { + OperandSize::S8 | OperandSize::S16 => 1, + _ => 0, + }, + opcode: match size { + OperandSize::S8 => 0xb6, + OperandSize::S16 => 0xb7, + _ => 0x8b, + }, + first_operand: destination, + second_operand: source, + indirect: Some(indirect), + ..Self::DEFAULT + } + } + + /// Store source in [destination + offset] + #[inline] + pub const fn store( + size: OperandSize, + source: u8, + destination: u8, + indirect: X86IndirectAccess, + ) -> Self { + exclude_operand_sizes!(size, OperandSize::S0); + Self { + size, + opcode: match size { + OperandSize::S8 => 0x88, + _ => 0x89, + }, + first_operand: source, + second_operand: destination, + indirect: Some(indirect), + ..Self::DEFAULT + } + } + + /// Load destination from sign-extended immediate + #[inline] + pub const fn load_immediate(size: OperandSize, destination: u8, immediate: i64) -> Self { + exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8 | OperandSize::S16); + let immediate_size = if immediate >= i32::MIN as i64 && immediate <= i32::MAX as i64 { + OperandSize::S32 + } else { + OperandSize::S64 + }; + match immediate_size { + OperandSize::S32 => Self { + size, + opcode: 0xc7, + second_operand: destination, + immediate_size: OperandSize::S32, + immediate, + ..Self::DEFAULT + }, + OperandSize::S64 => Self { + size, + opcode: 0xb8 | (destination & 0b111), + modrm: false, + second_operand: destination, + immediate_size: OperandSize::S64, + immediate, + ..Self::DEFAULT + }, + _ => unimplemented!(), + } + } + + /// Store sign-extended immediate in destination + #[inline] + pub const fn store_immediate( + size: OperandSize, + destination: u8, + indirect: X86IndirectAccess, + immediate: i64, + ) -> Self { + exclude_operand_sizes!(size, OperandSize::S0); + Self { + size, + opcode: match size { + OperandSize::S8 => 0xc6, + _ => 0xc7, + }, + second_operand: destination, + indirect: Some(indirect), + immediate_size: if let OperandSize::S64 = size { + OperandSize::S32 + } else { + size + }, + immediate, + ..Self::DEFAULT + } + } + + /// Push source onto the stack + #[allow(dead_code)] + #[inline] + pub const fn push_immediate(size: OperandSize, immediate: i32) -> Self { + exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S16); + Self { + size, + opcode: match size { + OperandSize::S8 => 0x6A, + _ => 0x68, + }, + modrm: false, + immediate_size: if let OperandSize::S64 = size { + OperandSize::S32 + } else { + size + }, + immediate: immediate as i64, + ..Self::DEFAULT + } + } + + /// Push source onto the stack + #[inline] + pub const fn push(source: u8, indirect: Option) -> Self { + if indirect.is_none() { + Self { + size: OperandSize::S32, + opcode: 0x50 | (source & 0b111), + modrm: false, + second_operand: source, + ..Self::DEFAULT + } + } else { + Self { + size: OperandSize::S64, + opcode: 0xFF, + modrm: true, + first_operand: 6, + second_operand: source, + indirect, + ..Self::DEFAULT + } + } + } + + /// Pop from the stack into destination + #[inline] + pub const fn pop(destination: u8) -> Self { + Self { + size: OperandSize::S32, + opcode: 0x58 | (destination & 0b111), + modrm: false, + second_operand: destination, + ..Self::DEFAULT + } + } + + /// Jump to relative destination on condition + #[inline] + pub const fn conditional_jump_immediate(opcode: u8, relative_destination: i32) -> Self { + Self { + size: OperandSize::S32, + opcode_escape_sequence: 1, + opcode, + modrm: false, + immediate_size: OperandSize::S32, + immediate: relative_destination as i64, + ..Self::DEFAULT + } + } + + /// Jump to relative destination + #[inline] + pub const fn jump_immediate(relative_destination: i32) -> Self { + Self { + size: OperandSize::S32, + opcode: 0xe9, + modrm: false, + immediate_size: OperandSize::S32, + immediate: relative_destination as i64, + ..Self::DEFAULT + } + } + + /// Push RIP and jump to relative destination + #[inline] + pub const fn call_immediate(relative_destination: i32) -> Self { + Self { + size: OperandSize::S32, + opcode: 0xe8, + modrm: false, + immediate_size: OperandSize::S32, + immediate: relative_destination as i64, + ..Self::DEFAULT + } + } + + /// Push RIP and jump to absolute destination + #[inline] + pub const fn call_reg(destination: u8, indirect: Option) -> Self { + Self { + size: OperandSize::S64, + opcode: 0xff, + first_operand: 2, + second_operand: destination, + indirect, + ..Self::DEFAULT + } + } + + /// Pop RIP + #[inline] + pub const fn return_near() -> Self { + Self { + size: OperandSize::S32, + opcode: 0xc3, + modrm: false, + ..Self::DEFAULT + } + } + + /// No operation + #[allow(dead_code)] + #[inline] + pub const fn noop() -> Self { + Self { + size: OperandSize::S32, + opcode: 0x90, + modrm: false, + ..Self::DEFAULT + } + } + + /// Trap / software interrupt + #[allow(dead_code)] + #[inline] + pub const fn interrupt(immediate: u8) -> Self { + if immediate == 3 { + Self { + size: OperandSize::S32, + opcode: 0xcc, + modrm: false, + ..Self::DEFAULT + } + } else { + Self { + size: OperandSize::S32, + opcode: 0xcd, + modrm: false, + immediate_size: OperandSize::S8, + immediate: immediate as i64, + ..Self::DEFAULT + } + } + } + + /// rdtsc + #[inline] + pub const fn cycle_count() -> Self { + Self { + size: OperandSize::S32, + opcode_escape_sequence: 1, + opcode: 0x31, + modrm: false, + ..Self::DEFAULT + } + } + + /// lfence / sfence / mfence + #[allow(dead_code)] + #[inline] + pub const fn fence(fence_type: FenceType) -> Self { + Self { + size: OperandSize::S32, + opcode_escape_sequence: 1, + opcode: 0xae, + first_operand: fence_type as u8, + ..Self::DEFAULT + } + } +} diff --git a/rbpf/test_utils/Cargo.lock b/rbpf/test_utils/Cargo.lock new file mode 100644 index 00000000000000..b1b098d8d035f2 --- /dev/null +++ b/rbpf/test_utils/Cargo.lock @@ -0,0 +1,266 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "ascii" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eab1c04a571841102f5345a8fc0f6bb3d31c315dec879b5c6e42e40ce7ffa34e" + +[[package]] +name = "byteorder" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "combine" +version = "3.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da3da6baa321ec19e1cc41d31bf599f00c783d0517095cdaf0332e3fe8d20680" +dependencies = [ + "ascii", + "byteorder", + "either", + "memchr", + "unreachable", +] + +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "getrandom" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi", +] + +[[package]] +name = "goblin" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c955ab4e0ad8c843ea653a3d143048b87490d9be56bd7132a435c2407846ac8f" +dependencies = [ + "log", + "plain", + "scroll", +] + +[[package]] +name = "hash32" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" +dependencies = [ + "byteorder", +] + +[[package]] +name = "libc" +version = "0.2.122" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec647867e2bf0772e28c8bcde4f0d19a9216916e890543b5a03ed8ef27b8f259" + +[[package]] +name = "log" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +dependencies = [ + "cfg-if 0.1.10", +] + +[[package]] +name = "memchr" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" + +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + +[[package]] +name = "ppv-lite86" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c36fa947111f5c62a733b652544dd0016a43ce89619538a8ef92724a6f501a20" + +[[package]] +name = "proc-macro2" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" + +[[package]] +name = "scroll" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" +dependencies = [ + "scroll_derive", +] + +[[package]] +name = "scroll_derive" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdbda6ac5cd1321e724fa9cee216f3a61885889b896f073b8f82322789c5250e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "solana_rbpf" +version = "0.8.0" +dependencies = [ + "byteorder", + "combine", + "goblin", + "hash32", + "libc", + "log", + "rand", + "rustc-demangle", + "scroll", + "thiserror", +] + +[[package]] +name = "syn" +version = "1.0.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea9c5432ff16d6152371f808fb5a871cd67368171b09bb21b43df8e4a47a3556" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "test_utils" +version = "0.8.0" +dependencies = [ + "libc", + "solana_rbpf", +] + +[[package]] +name = "thiserror" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93119e4feac1cbe6c798c34d3a53ea0026b0b1de6a120deef895137c0529bfe2" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "unicode-xid" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" + +[[package]] +name = "unreachable" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" +dependencies = [ + "void", +] + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" diff --git a/rbpf/test_utils/Cargo.toml b/rbpf/test_utils/Cargo.toml new file mode 100644 index 00000000000000..4d9dd495f01b47 --- /dev/null +++ b/rbpf/test_utils/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "test_utils" +version = "0.8.0" +authors = ["Solana Maintainers "] +edition = "2018" +publish = false + +[dependencies] +libc = "0.2" +solana_rbpf = { path = "../" } diff --git a/rbpf/test_utils/src/lib.rs b/rbpf/test_utils/src/lib.rs new file mode 100644 index 00000000000000..7c56eb571cd097 --- /dev/null +++ b/rbpf/test_utils/src/lib.rs @@ -0,0 +1,225 @@ +// Converted from the tests for uBPF +// Copyright 2015 Big Switch Networks, Inc +// Copyright 2016 6WIND S.A. +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +#![allow(dead_code)] + +use solana_rbpf::{ + aligned_memory::AlignedMemory, + ebpf::{self, HOST_ALIGN}, + elf::Executable, + error::EbpfError, + memory_region::{MemoryCowCallback, MemoryMapping, MemoryRegion}, + vm::ContextObject, +}; + +// Assembly code and data for tcp_sack testcases. + +pub const PROG_TCP_PORT_80: &str = " + ldxb r2, [r1+0xc] + ldxb r3, [r1+0xd] + lsh64 r3, 0x8 + or64 r3, r2 + mov64 r0, 0x0 + jne r3, 0x8, +0xc + ldxb r2, [r1+0x17] + jne r2, 0x6, +0xa + ldxb r2, [r1+0xe] + add64 r1, 0xe + and64 r2, 0xf + lsh64 r2, 0x2 + add64 r1, r2 + ldxh r2, [r1+0x2] + jeq r2, 0x5000, +0x2 + ldxh r1, [r1+0x0] + jne r1, 0x5000, +0x1 + mov64 r0, 0x1 + exit"; + +pub const TCP_SACK_ASM: &str = " + ldxb r2, [r1+12] + ldxb r3, [r1+13] + lsh r3, 0x8 + or r3, r2 + mov r0, 0x0 + jne r3, 0x8, +37 + ldxb r2, [r1+23] + jne r2, 0x6, +35 + ldxb r2, [r1+14] + add r1, 0xe + and r2, 0xf + lsh r2, 0x2 + add r1, r2 + mov r0, 0x0 + ldxh r4, [r1+12] + add r1, 0x14 + rsh r4, 0x2 + and r4, 0x3c + mov r2, r4 + add r2, -20 + mov r5, 0x15 + mov r3, 0x0 + jgt r5, r4, +20 + mov r5, r3 + lsh r5, 0x20 + arsh r5, 0x20 + mov r4, r1 + add r4, r5 + ldxb r5, [r4] + jeq r5, 0x1, +4 + jeq r5, 0x0, +12 + mov r6, r3 + jeq r5, 0x5, +9 + ja +2 + add r3, 0x1 + mov r6, r3 + ldxb r3, [r4+1] + add r3, r6 + lsh r3, 0x20 + arsh r3, 0x20 + jsgt r2, r3, -18 + ja +1 + mov r0, 0x1 + exit"; + +pub const TCP_SACK_BIN: [u8; 352] = [ + 0x71, 0x12, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x71, 0x13, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x67, 0x03, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, // + 0x4f, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x55, 0x03, 0x25, 0x00, 0x08, 0x00, 0x00, 0x00, // + 0x71, 0x12, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x55, 0x02, 0x23, 0x00, 0x06, 0x00, 0x00, 0x00, // + 0x71, 0x12, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x07, 0x01, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, // + 0x57, 0x02, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, // + 0x67, 0x02, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, // + 0x0f, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x69, 0x14, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x07, 0x01, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, // + 0x77, 0x04, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, // + 0x57, 0x04, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, // + 0xbf, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x07, 0x02, 0x00, 0x00, 0xec, 0xff, 0xff, 0xff, // + 0xb7, 0x05, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, // + 0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x2d, 0x45, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0xbf, 0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x67, 0x05, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, // + 0xc7, 0x05, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, // + 0xbf, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x0f, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x71, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x15, 0x05, 0x04, 0x00, 0x01, 0x00, 0x00, 0x00, // + 0x15, 0x05, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0xbf, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x15, 0x05, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00, // + 0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x07, 0x03, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // + 0xbf, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x71, 0x43, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x0f, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x67, 0x03, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, // + 0xc7, 0x03, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, // + 0x6d, 0x32, 0xee, 0xff, 0x00, 0x00, 0x00, 0x00, // + 0x05, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0xb7, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // + 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // +]; + +pub const TCP_SACK_MATCH: [u8; 78] = [ + 0x00, 0x26, 0x62, 0x2f, 0x47, 0x87, 0x00, 0x1d, // + 0x60, 0xb3, 0x01, 0x84, 0x08, 0x00, 0x45, 0x00, // + 0x00, 0x40, 0xa8, 0xde, 0x40, 0x00, 0x40, 0x06, // + 0x9d, 0x58, 0xc0, 0xa8, 0x01, 0x03, 0x3f, 0x74, // + 0xf3, 0x61, 0xe5, 0xc0, 0x00, 0x50, 0xe5, 0x94, // + 0x3f, 0x77, 0xa3, 0xc4, 0xc4, 0x80, 0xb0, 0x10, // + 0x01, 0x3e, 0x34, 0xb6, 0x00, 0x00, 0x01, 0x01, // + 0x08, 0x0a, 0x00, 0x17, 0x95, 0x6f, 0x8d, 0x9d, // + 0x9e, 0x27, 0x01, 0x01, 0x05, 0x0a, 0xa3, 0xc4, // + 0xca, 0x28, 0xa3, 0xc4, 0xcf, 0xd0, // +]; + +pub const TCP_SACK_NOMATCH: [u8; 66] = [ + 0x00, 0x26, 0x62, 0x2f, 0x47, 0x87, 0x00, 0x1d, // + 0x60, 0xb3, 0x01, 0x84, 0x08, 0x00, 0x45, 0x00, // + 0x00, 0x40, 0xa8, 0xde, 0x40, 0x00, 0x40, 0x06, // + 0x9d, 0x58, 0xc0, 0xa8, 0x01, 0x03, 0x3f, 0x74, // + 0xf3, 0x61, 0xe5, 0xc0, 0x00, 0x50, 0xe5, 0x94, // + 0x3f, 0x77, 0xa3, 0xc4, 0xc4, 0x80, 0x80, 0x10, // + 0x01, 0x3e, 0x34, 0xb6, 0x00, 0x00, 0x01, 0x01, // + 0x08, 0x0a, 0x00, 0x17, 0x95, 0x6f, 0x8d, 0x9d, // + 0x9e, 0x27, // +]; + +pub fn create_memory_mapping<'a, C: ContextObject>( + executable: &'a Executable, + stack: &'a mut AlignedMemory<{ HOST_ALIGN }>, + heap: &'a mut AlignedMemory<{ HOST_ALIGN }>, + additional_regions: Vec, + cow_cb: Option, +) -> Result, EbpfError> { + let config = executable.get_config(); + let sbpf_version = executable.get_sbpf_version(); + let regions: Vec = vec![ + executable.get_ro_region(), + MemoryRegion::new_writable_gapped( + stack.as_slice_mut(), + ebpf::MM_STACK_START, + if !sbpf_version.dynamic_stack_frames() && config.enable_stack_frame_gaps { + config.stack_frame_size as u64 + } else { + 0 + }, + ), + MemoryRegion::new_writable(heap.as_slice_mut(), ebpf::MM_HEAP_START), + ] + .into_iter() + .chain(additional_regions.into_iter()) + .collect(); + + Ok(if let Some(cow_cb) = cow_cb { + MemoryMapping::new_with_cow(regions, cow_cb, config, sbpf_version)? + } else { + MemoryMapping::new(regions, config, sbpf_version)? + }) +} + +#[macro_export] +macro_rules! create_vm { + ($vm_name:ident, $verified_executable:expr, $context_object:expr, $stack:ident, $heap:ident, $additional_regions:expr, $cow_cb:expr) => { + let mut $stack = solana_rbpf::aligned_memory::AlignedMemory::zero_filled( + $verified_executable.get_config().stack_size(), + ); + let mut $heap = solana_rbpf::aligned_memory::AlignedMemory::with_capacity(0); + let stack_len = $stack.len(); + let memory_mapping = test_utils::create_memory_mapping( + $verified_executable, + &mut $stack, + &mut $heap, + $additional_regions, + $cow_cb, + ) + .unwrap(); + let mut $vm_name = solana_rbpf::vm::EbpfVm::new( + $verified_executable.get_loader().clone(), + $verified_executable.get_sbpf_version(), + $context_object, + memory_mapping, + stack_len, + ); + }; +} + +#[macro_export] +macro_rules! assert_error { + ($result:expr, $($error:expr),+) => { + assert!(format!("{:?}", $result).contains(&format!($($error),+))); + } +} diff --git a/rbpf/tests/assembler.rs b/rbpf/tests/assembler.rs new file mode 100644 index 00000000000000..86f92921978559 --- /dev/null +++ b/rbpf/tests/assembler.rs @@ -0,0 +1,560 @@ +#![allow(clippy::arithmetic_side_effects)] +// Copyright 2017 Rich Lane +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +extern crate solana_rbpf; +extern crate test_utils; + +use solana_rbpf::{assembler::assemble, ebpf, program::BuiltinProgram, vm::TestContextObject}; +use std::sync::Arc; +use test_utils::{TCP_SACK_ASM, TCP_SACK_BIN}; + +fn asm(src: &str) -> Result, String> { + let executable = assemble::(src, Arc::new(BuiltinProgram::new_mock()))?; + let (_program_vm_addr, program) = executable.get_text_bytes(); + Ok((0..program.len() / ebpf::INSN_SIZE) + .map(|insn_ptr| ebpf::get_insn(program, insn_ptr)) + .collect()) +} + +fn insn(ptr: usize, opc: u8, dst: u8, src: u8, off: i16, imm: i64) -> ebpf::Insn { + ebpf::Insn { + ptr, + opc, + dst, + src, + off, + imm, + } +} + +#[test] +fn test_empty() { + assert_eq!(asm(""), Ok(vec![])); +} + +// Example for InstructionType::NoOperand. +#[test] +fn test_exit() { + assert_eq!(asm("exit"), Ok(vec![insn(0, ebpf::EXIT, 0, 0, 0, 0)])); +} + +// Example for InstructionType::AluBinary. +#[test] +fn test_add64() { + assert_eq!( + asm("add64 r1, r3"), + Ok(vec![insn(0, ebpf::ADD64_REG, 1, 3, 0, 0)]) + ); + assert_eq!( + asm("add64 r1, 5"), + Ok(vec![insn(0, ebpf::ADD64_IMM, 1, 0, 0, 5)]) + ); +} + +// Example for InstructionType::AluUnary. +#[test] +fn test_neg64() { + assert_eq!(asm("neg64 r1"), Ok(vec![insn(0, ebpf::NEG64, 1, 0, 0, 0)])); +} + +// Example for InstructionType::LoadReg. +#[test] +fn test_ldxw() { + assert_eq!( + asm("ldxw r1, [r2+5]"), + Ok(vec![insn(0, ebpf::LD_W_REG, 1, 2, 5, 0)]) + ); +} + +// Example for InstructionType::StoreImm. +#[test] +fn test_stw() { + assert_eq!( + asm("stw [r2+5], 7"), + Ok(vec![insn(0, ebpf::ST_W_IMM, 2, 0, 5, 7)]) + ); +} + +// Example for InstructionType::StoreReg. +#[test] +fn test_stxw() { + assert_eq!( + asm("stxw [r2+5], r8"), + Ok(vec![insn(0, ebpf::ST_W_REG, 2, 8, 5, 0)]) + ); +} + +// Example for InstructionType::JumpUnconditional. +#[test] +fn test_ja() { + assert_eq!(asm("ja +8"), Ok(vec![insn(0, ebpf::JA, 0, 0, 8, 0)])); + assert_eq!(asm("ja -3"), Ok(vec![insn(0, ebpf::JA, 0, 0, -3, 0)])); +} + +// Example for InstructionType::JumpConditional. +#[test] +fn test_jeq() { + assert_eq!( + asm("jeq r1, 4, +8"), + Ok(vec![insn(0, ebpf::JEQ_IMM, 1, 0, 8, 4)]) + ); + assert_eq!( + asm("jeq r1, r3, +8"), + Ok(vec![insn(0, ebpf::JEQ_REG, 1, 3, 8, 0)]) + ); +} + +#[test] +fn test_call_reg() { + assert_eq!( + asm("callx r3"), + Ok(vec![insn(0, ebpf::CALL_REG, 0, 3, 0, 0)]) + ); +} + +// Example for InstructionType::Call. +#[test] +fn test_call_imm() { + assert_eq!( + asm("call 299"), + Ok(vec![insn(0, ebpf::CALL_IMM, 0, 1, 0, 300)]) + ); +} + +// Example for InstructionType::Endian. +#[test] +fn test_be32() { + assert_eq!(asm("be32 r1"), Ok(vec![insn(0, ebpf::BE, 1, 0, 0, 32)])); +} + +// Example for InstructionType::LoadImm. +#[test] +fn test_lddw() { + assert_eq!( + asm("lddw r1, 0x1234abcd5678eeff"), + Ok(vec![ + insn(0, ebpf::LD_DW_IMM, 1, 0, 0, 0x5678eeff), + insn(1, 0, 0, 0, 0, 0x1234abcd) + ]) + ); + assert_eq!( + asm("lddw r1, 0xff11ee22dd33cc44"), + Ok(vec![ + insn(0, ebpf::LD_DW_IMM, 1, 0, 0, 0xffffffffdd33cc44u64 as i64), + insn(1, 0, 0, 0, 0, 0xffffffffff11ee22u64 as i64) + ]) + ); +} + +// Example for InstructionType::LoadReg. +#[test] +fn test_ldxdw() { + assert_eq!( + asm("ldxdw r1, [r2+3]"), + Ok(vec![insn(0, ebpf::LD_DW_REG, 1, 2, 3, 0)]) + ); +} + +// Example for InstructionType::StoreImm. +#[test] +fn test_sth() { + assert_eq!( + asm("sth [r1+2], 3"), + Ok(vec![insn(0, ebpf::ST_H_IMM, 1, 0, 2, 3)]) + ); +} + +// Example for InstructionType::StoreReg. +#[test] +fn test_stxh() { + assert_eq!( + asm("stxh [r1+2], r3"), + Ok(vec![insn(0, ebpf::ST_H_REG, 1, 3, 2, 0)]) + ); +} + +// Test all supported AluBinary mnemonics. +#[test] +fn test_alu_binary() { + assert_eq!( + asm("add r1, r2 + sub r1, r2 + mul r1, r2 + div r1, r2 + or r1, r2 + and r1, r2 + lsh r1, r2 + rsh r1, r2 + mod r1, r2 + xor r1, r2 + mov r1, r2 + arsh r1, r2"), + Ok(vec![ + insn(0, ebpf::ADD64_REG, 1, 2, 0, 0), + insn(1, ebpf::SUB64_REG, 1, 2, 0, 0), + insn(2, ebpf::MUL64_REG, 1, 2, 0, 0), + insn(3, ebpf::DIV64_REG, 1, 2, 0, 0), + insn(4, ebpf::OR64_REG, 1, 2, 0, 0), + insn(5, ebpf::AND64_REG, 1, 2, 0, 0), + insn(6, ebpf::LSH64_REG, 1, 2, 0, 0), + insn(7, ebpf::RSH64_REG, 1, 2, 0, 0), + insn(8, ebpf::MOD64_REG, 1, 2, 0, 0), + insn(9, ebpf::XOR64_REG, 1, 2, 0, 0), + insn(10, ebpf::MOV64_REG, 1, 2, 0, 0), + insn(11, ebpf::ARSH64_REG, 1, 2, 0, 0) + ]) + ); + + assert_eq!( + asm("add r1, 2 + sub r1, 2 + mul r1, 2 + div r1, 2 + or r1, 2 + and r1, 2 + lsh r1, 2 + rsh r1, 2 + mod r1, 2 + xor r1, 2 + mov r1, 2 + arsh r1, 2"), + Ok(vec![ + insn(0, ebpf::ADD64_IMM, 1, 0, 0, 2), + insn(1, ebpf::SUB64_IMM, 1, 0, 0, 2), + insn(2, ebpf::MUL64_IMM, 1, 0, 0, 2), + insn(3, ebpf::DIV64_IMM, 1, 0, 0, 2), + insn(4, ebpf::OR64_IMM, 1, 0, 0, 2), + insn(5, ebpf::AND64_IMM, 1, 0, 0, 2), + insn(6, ebpf::LSH64_IMM, 1, 0, 0, 2), + insn(7, ebpf::RSH64_IMM, 1, 0, 0, 2), + insn(8, ebpf::MOD64_IMM, 1, 0, 0, 2), + insn(9, ebpf::XOR64_IMM, 1, 0, 0, 2), + insn(10, ebpf::MOV64_IMM, 1, 0, 0, 2), + insn(11, ebpf::ARSH64_IMM, 1, 0, 0, 2) + ]) + ); + + assert_eq!( + asm("add64 r1, r2 + sub64 r1, r2 + mul64 r1, r2 + div64 r1, r2 + or64 r1, r2 + and64 r1, r2 + lsh64 r1, r2 + rsh64 r1, r2 + mod64 r1, r2 + xor64 r1, r2 + mov64 r1, r2 + arsh64 r1, r2"), + Ok(vec![ + insn(0, ebpf::ADD64_REG, 1, 2, 0, 0), + insn(1, ebpf::SUB64_REG, 1, 2, 0, 0), + insn(2, ebpf::MUL64_REG, 1, 2, 0, 0), + insn(3, ebpf::DIV64_REG, 1, 2, 0, 0), + insn(4, ebpf::OR64_REG, 1, 2, 0, 0), + insn(5, ebpf::AND64_REG, 1, 2, 0, 0), + insn(6, ebpf::LSH64_REG, 1, 2, 0, 0), + insn(7, ebpf::RSH64_REG, 1, 2, 0, 0), + insn(8, ebpf::MOD64_REG, 1, 2, 0, 0), + insn(9, ebpf::XOR64_REG, 1, 2, 0, 0), + insn(10, ebpf::MOV64_REG, 1, 2, 0, 0), + insn(11, ebpf::ARSH64_REG, 1, 2, 0, 0) + ]) + ); + + assert_eq!( + asm("add64 r1, 2 + sub64 r1, 2 + mul64 r1, 2 + div64 r1, 2 + or64 r1, 2 + and64 r1, 2 + lsh64 r1, 2 + rsh64 r1, 2 + mod64 r1, 2 + xor64 r1, 2 + mov64 r1, 2 + arsh64 r1, 2"), + Ok(vec![ + insn(0, ebpf::ADD64_IMM, 1, 0, 0, 2), + insn(1, ebpf::SUB64_IMM, 1, 0, 0, 2), + insn(2, ebpf::MUL64_IMM, 1, 0, 0, 2), + insn(3, ebpf::DIV64_IMM, 1, 0, 0, 2), + insn(4, ebpf::OR64_IMM, 1, 0, 0, 2), + insn(5, ebpf::AND64_IMM, 1, 0, 0, 2), + insn(6, ebpf::LSH64_IMM, 1, 0, 0, 2), + insn(7, ebpf::RSH64_IMM, 1, 0, 0, 2), + insn(8, ebpf::MOD64_IMM, 1, 0, 0, 2), + insn(9, ebpf::XOR64_IMM, 1, 0, 0, 2), + insn(10, ebpf::MOV64_IMM, 1, 0, 0, 2), + insn(11, ebpf::ARSH64_IMM, 1, 0, 0, 2) + ]) + ); + + assert_eq!( + asm("add32 r1, r2 + sub32 r1, r2 + mul32 r1, r2 + div32 r1, r2 + or32 r1, r2 + and32 r1, r2 + lsh32 r1, r2 + rsh32 r1, r2 + mod32 r1, r2 + xor32 r1, r2 + mov32 r1, r2 + arsh32 r1, r2"), + Ok(vec![ + insn(0, ebpf::ADD32_REG, 1, 2, 0, 0), + insn(1, ebpf::SUB32_REG, 1, 2, 0, 0), + insn(2, ebpf::MUL32_REG, 1, 2, 0, 0), + insn(3, ebpf::DIV32_REG, 1, 2, 0, 0), + insn(4, ebpf::OR32_REG, 1, 2, 0, 0), + insn(5, ebpf::AND32_REG, 1, 2, 0, 0), + insn(6, ebpf::LSH32_REG, 1, 2, 0, 0), + insn(7, ebpf::RSH32_REG, 1, 2, 0, 0), + insn(8, ebpf::MOD32_REG, 1, 2, 0, 0), + insn(9, ebpf::XOR32_REG, 1, 2, 0, 0), + insn(10, ebpf::MOV32_REG, 1, 2, 0, 0), + insn(11, ebpf::ARSH32_REG, 1, 2, 0, 0) + ]) + ); + + assert_eq!( + asm("add32 r1, 2 + sub32 r1, 2 + mul32 r1, 2 + div32 r1, 2 + or32 r1, 2 + and32 r1, 2 + lsh32 r1, 2 + rsh32 r1, 2 + mod32 r1, 2 + xor32 r1, 2 + mov32 r1, 2 + arsh32 r1, 2"), + Ok(vec![ + insn(0, ebpf::ADD32_IMM, 1, 0, 0, 2), + insn(1, ebpf::SUB32_IMM, 1, 0, 0, 2), + insn(2, ebpf::MUL32_IMM, 1, 0, 0, 2), + insn(3, ebpf::DIV32_IMM, 1, 0, 0, 2), + insn(4, ebpf::OR32_IMM, 1, 0, 0, 2), + insn(5, ebpf::AND32_IMM, 1, 0, 0, 2), + insn(6, ebpf::LSH32_IMM, 1, 0, 0, 2), + insn(7, ebpf::RSH32_IMM, 1, 0, 0, 2), + insn(8, ebpf::MOD32_IMM, 1, 0, 0, 2), + insn(9, ebpf::XOR32_IMM, 1, 0, 0, 2), + insn(10, ebpf::MOV32_IMM, 1, 0, 0, 2), + insn(11, ebpf::ARSH32_IMM, 1, 0, 0, 2) + ]) + ); +} + +// Test all supported AluUnary mnemonics. +#[test] +fn test_alu_unary() { + assert_eq!( + asm("neg r1 + neg64 r1 + neg32 r1"), + Ok(vec![ + insn(0, ebpf::NEG64, 1, 0, 0, 0), + insn(1, ebpf::NEG64, 1, 0, 0, 0), + insn(2, ebpf::NEG32, 1, 0, 0, 0) + ]) + ); +} + +// Test all supported LoadReg mnemonics. +#[test] +fn test_load_reg() { + assert_eq!( + asm("ldxw r1, [r2+3] + ldxh r1, [r2+3] + ldxb r1, [r2+3] + ldxdw r1, [r2+3]"), + Ok(vec![ + insn(0, ebpf::LD_W_REG, 1, 2, 3, 0), + insn(1, ebpf::LD_H_REG, 1, 2, 3, 0), + insn(2, ebpf::LD_B_REG, 1, 2, 3, 0), + insn(3, ebpf::LD_DW_REG, 1, 2, 3, 0) + ]) + ); +} + +// Test all supported StoreImm mnemonics. +#[test] +fn test_store_imm() { + assert_eq!( + asm("stw [r1+2], 3 + sth [r1+2], 3 + stb [r1+2], 3 + stdw [r1+2], 3"), + Ok(vec![ + insn(0, ebpf::ST_W_IMM, 1, 0, 2, 3), + insn(1, ebpf::ST_H_IMM, 1, 0, 2, 3), + insn(2, ebpf::ST_B_IMM, 1, 0, 2, 3), + insn(3, ebpf::ST_DW_IMM, 1, 0, 2, 3) + ]) + ); +} + +// Test all supported StoreReg mnemonics. +#[test] +fn test_store_reg() { + assert_eq!( + asm("stxw [r1+2], r3 + stxh [r1+2], r3 + stxb [r1+2], r3 + stxdw [r1+2], r3"), + Ok(vec![ + insn(0, ebpf::ST_W_REG, 1, 3, 2, 0), + insn(1, ebpf::ST_H_REG, 1, 3, 2, 0), + insn(2, ebpf::ST_B_REG, 1, 3, 2, 0), + insn(3, ebpf::ST_DW_REG, 1, 3, 2, 0) + ]) + ); +} + +// Test all supported JumpConditional mnemonics. +#[test] +fn test_jump_conditional() { + assert_eq!( + asm("jeq r1, r2, +3 + jgt r1, r2, +3 + jge r1, r2, +3 + jlt r1, r2, +3 + jle r1, r2, +3 + jset r1, r2, +3 + jne r1, r2, +3 + jsgt r1, r2, +3 + jsge r1, r2, +3 + jslt r1, r2, +3 + jsle r1, r2, +3"), + Ok(vec![ + insn(0, ebpf::JEQ_REG, 1, 2, 3, 0), + insn(1, ebpf::JGT_REG, 1, 2, 3, 0), + insn(2, ebpf::JGE_REG, 1, 2, 3, 0), + insn(3, ebpf::JLT_REG, 1, 2, 3, 0), + insn(4, ebpf::JLE_REG, 1, 2, 3, 0), + insn(5, ebpf::JSET_REG, 1, 2, 3, 0), + insn(6, ebpf::JNE_REG, 1, 2, 3, 0), + insn(7, ebpf::JSGT_REG, 1, 2, 3, 0), + insn(8, ebpf::JSGE_REG, 1, 2, 3, 0), + insn(9, ebpf::JSLT_REG, 1, 2, 3, 0), + insn(10, ebpf::JSLE_REG, 1, 2, 3, 0) + ]) + ); + + assert_eq!( + asm("jeq r1, 2, +3 + jgt r1, 2, +3 + jge r1, 2, +3 + jlt r1, 2, +3 + jle r1, 2, +3 + jset r1, 2, +3 + jne r1, 2, +3 + jsgt r1, 2, +3 + jsge r1, 2, +3 + jslt r1, 2, +3 + jsle r1, 2, +3"), + Ok(vec![ + insn(0, ebpf::JEQ_IMM, 1, 0, 3, 2), + insn(1, ebpf::JGT_IMM, 1, 0, 3, 2), + insn(2, ebpf::JGE_IMM, 1, 0, 3, 2), + insn(3, ebpf::JLT_IMM, 1, 0, 3, 2), + insn(4, ebpf::JLE_IMM, 1, 0, 3, 2), + insn(5, ebpf::JSET_IMM, 1, 0, 3, 2), + insn(6, ebpf::JNE_IMM, 1, 0, 3, 2), + insn(7, ebpf::JSGT_IMM, 1, 0, 3, 2), + insn(8, ebpf::JSGE_IMM, 1, 0, 3, 2), + insn(9, ebpf::JSLT_IMM, 1, 0, 3, 2), + insn(10, ebpf::JSLE_IMM, 1, 0, 3, 2) + ]) + ); +} + +// Test all supported Endian mnemonics. +#[test] +fn test_endian() { + assert_eq!( + asm("be16 r1 + be32 r1 + be64 r1 + le16 r1 + le32 r1 + le64 r1"), + Ok(vec![ + insn(0, ebpf::BE, 1, 0, 0, 16), + insn(1, ebpf::BE, 1, 0, 0, 32), + insn(2, ebpf::BE, 1, 0, 0, 64), + insn(3, ebpf::LE, 1, 0, 0, 16), + insn(4, ebpf::LE, 1, 0, 0, 32), + insn(5, ebpf::LE, 1, 0, 0, 64) + ]) + ); +} + +#[test] +fn test_large_immediate() { + assert_eq!( + asm("add64 r1, 2147483647"), + Ok(vec![insn(0, ebpf::ADD64_IMM, 1, 0, 0, 2147483647)]) + ); + assert_eq!( + asm("add64 r1, -2147483648"), + Ok(vec![insn(0, ebpf::ADD64_IMM, 1, 0, 0, -2147483648)]) + ); +} + +#[test] +fn test_tcp_sack() { + let executable = + assemble::(TCP_SACK_ASM, Arc::new(BuiltinProgram::new_mock())).unwrap(); + let (_program_vm_addr, program) = executable.get_text_bytes(); + assert_eq!(program, TCP_SACK_BIN.to_vec()); +} + +#[test] +fn test_error_invalid_instruction() { + assert_eq!(asm("abcd"), Err("Invalid instruction \"abcd\"".to_string())); +} + +#[test] +fn test_error_unexpected_operands() { + assert_eq!( + asm("add 1, 2"), + Err("Unexpected operands: [Integer(1), Integer(2)]".to_string()) + ); +} + +#[test] +fn test_error_operands_out_of_range() { + assert_eq!( + asm("add r16, r2"), + Err("Invalid destination register 16".to_string()) + ); + assert_eq!( + asm("add r1, r16"), + Err("Invalid source register 16".to_string()) + ); + assert_eq!(asm("ja -32769"), Err("Invalid offset -32769".to_string())); + assert_eq!(asm("ja 32768"), Err("Invalid offset 32768".to_string())); + assert_eq!( + asm("add r1, 4294967296"), + Err("Invalid immediate 4294967296".to_string()) + ); + assert_eq!( + asm("add r1, 2147483648"), + Err("Invalid immediate 2147483648".to_string()) + ); + assert_eq!( + asm("add r1, -2147483649"), + Err("Invalid immediate -2147483649".to_string()) + ); +} diff --git a/rbpf/tests/disassembler.rs b/rbpf/tests/disassembler.rs new file mode 100644 index 00000000000000..a3080b31e6e485 --- /dev/null +++ b/rbpf/tests/disassembler.rs @@ -0,0 +1,341 @@ +// Copyright 2017 Jan-Erik Rediger +// +// Adopted from tests in `tests/assembler.rs` +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +extern crate solana_rbpf; +use solana_rbpf::{ + assembler::assemble, + program::{BuiltinProgram, FunctionRegistry}, + static_analysis::Analysis, + vm::{Config, TestContextObject}, +}; +use std::sync::Arc; + +// Using a macro to keep actual line numbers in failure output +macro_rules! disasm { + ($src:expr) => {{ + let src = $src; + let loader = BuiltinProgram::new_loader( + Config { + enable_symbol_and_section_labels: true, + ..Config::default() + }, + FunctionRegistry::default(), + ); + let executable = assemble::(src, Arc::new(loader)).unwrap(); + let analysis = Analysis::from_executable(&executable).unwrap(); + let mut reasm = Vec::new(); + analysis.disassemble(&mut reasm).unwrap(); + assert_eq!(src, String::from_utf8(reasm).unwrap()); + }}; +} + +#[test] +fn test_empty() { + disasm!(""); +} + +// Example for InstructionType::NoOperand. +#[test] +fn test_exit() { + disasm!("entrypoint:\n exit\n"); +} + +// Example for InstructionType::AluBinary. +#[test] +fn test_add64() { + disasm!("entrypoint:\n add64 r1, r3\n"); + disasm!("entrypoint:\n add64 r1, 5\n"); +} + +// Example for InstructionType::AluUnary. +#[test] +fn test_neg64() { + disasm!("entrypoint:\n neg64 r1\n"); +} + +// Example for InstructionType::LoadReg. +#[test] +fn test_ldxw() { + disasm!("entrypoint:\n ldxw r1, [r2+0x5]\n"); + disasm!("entrypoint:\n ldxw r1, [r2-0x5]\n"); +} + +// Example for InstructionType::StoreImm. +#[test] +fn test_stw() { + disasm!("entrypoint:\n stw [r2+0x5], 7\n"); + disasm!("entrypoint:\n stw [r2-0x5], 7\n"); +} + +// Example for InstructionType::StoreReg. +#[test] +fn test_stxw() { + disasm!("entrypoint:\n stxw [r2+0x5], r8\n"); + disasm!("entrypoint:\n stxw [r2-0x5], r8\n"); +} + +// Example for InstructionType::JumpUnconditional. +#[test] +fn test_ja() { + disasm!( + "entrypoint: + ja lbb_1 +lbb_1: + exit +" + ); +} + +// Example for InstructionType::JumpConditional. +#[test] +fn test_jeq() { + disasm!( + "entrypoint: + jeq r1, 4, lbb_1 +lbb_1: + exit +" + ); + disasm!( + "entrypoint: + jeq r1, r3, lbb_1 +lbb_1: + exit +" + ); +} + +// Example for InstructionType::Call. +#[test] +fn test_call() { + disasm!( + "entrypoint: + call function_1 + +function_1: + exit +" + ); +} + +// Example for InstructionType::Endian. +#[test] +fn test_be32() { + disasm!("entrypoint:\n be32 r1\n"); +} + +// Example for InstructionType::LoadImm. +#[test] +fn test_lddw() { + disasm!("entrypoint:\n lddw r1, 0x1234abcd5678eeff\n"); + disasm!("entrypoint:\n lddw r1, 0xff11ee22dd33cc44\n"); +} + +// Example for InstructionType::LoadReg. +#[test] +fn test_ldxdw() { + disasm!("entrypoint:\n ldxdw r1, [r2+0x3]\n"); + disasm!("entrypoint:\n ldxdw r1, [r2-0x3]\n"); +} + +// Example for InstructionType::StoreImm. +#[test] +fn test_sth() { + disasm!("entrypoint:\n sth [r1+0x2], 3\n"); + disasm!("entrypoint:\n sth [r1-0x2], 3\n"); +} + +// Example for InstructionType::StoreReg. +#[test] +fn test_stxh() { + disasm!("entrypoint:\n stxh [r1+0x2], r3\n"); + disasm!("entrypoint:\n stxh [r1-0x2], r3\n"); +} + +// Test all supported AluBinary mnemonics. +#[test] +fn test_alu_binary() { + disasm!( + "entrypoint: + add64 r1, r2 + sub64 r1, r2 + mul64 r1, r2 + div64 r1, r2 + or64 r1, r2 + and64 r1, r2 + lsh64 r1, r2 + rsh64 r1, r2 + mod64 r1, r2 + xor64 r1, r2 + mov64 r1, r2 + arsh64 r1, r2 +" + ); + + disasm!( + "entrypoint: + add64 r1, 2 + sub64 r1, 2 + mul64 r1, 2 + div64 r1, 2 + or64 r1, 2 + and64 r1, 2 + lsh64 r1, 2 + rsh64 r1, 2 + mod64 r1, 2 + xor64 r1, 2 + mov64 r1, 2 + arsh64 r1, 2 +" + ); + + disasm!( + "entrypoint: + add32 r1, r2 + sub32 r1, r2 + mul32 r1, r2 + div32 r1, r2 + or32 r1, r2 + and32 r1, r2 + lsh32 r1, r2 + rsh32 r1, r2 + mod32 r1, r2 + xor32 r1, r2 + mov32 r1, r2 + arsh32 r1, r2 +" + ); + + disasm!( + "entrypoint: + add32 r1, 2 + sub32 r1, 2 + mul32 r1, 2 + div32 r1, 2 + or32 r1, 2 + and32 r1, 2 + lsh32 r1, 2 + rsh32 r1, 2 + mod32 r1, 2 + xor32 r1, 2 + mov32 r1, 2 + arsh32 r1, 2 +" + ); +} + +// Test all supported AluUnary mnemonics. +#[test] +fn test_alu_unary() { + disasm!( + "entrypoint: + neg64 r1 + neg32 r1 +" + ); +} + +// Test all supported LoadReg mnemonics. +#[test] +fn test_load_reg() { + disasm!( + r"entrypoint: + ldxw r1, [r2+0x3] + ldxh r1, [r2+0x3] + ldxb r1, [r2+0x3] + ldxdw r1, [r2+0x3] +" + ); +} + +// Test all supported StoreImm mnemonics. +#[test] +fn test_store_imm() { + disasm!( + "entrypoint: + stw [r1+0x2], 3 + sth [r1+0x2], 3 + stb [r1+0x2], 3 + stdw [r1+0x2], 3 +" + ); +} + +// Test all supported StoreReg mnemonics. +#[test] +fn test_store_reg() { + disasm!( + "entrypoint: + stxw [r1+0x2], r3 + stxh [r1+0x2], r3 + stxb [r1+0x2], r3 + stxdw [r1+0x2], r3 +" + ); +} + +// Test all supported JumpConditional mnemonics. +#[test] +fn test_jump_conditional() { + disasm!( + "entrypoint: + jeq r1, r2, lbb_11 + jgt r1, r2, lbb_11 + jge r1, r2, lbb_11 + jlt r1, r2, lbb_11 + jle r1, r2, lbb_11 + jset r1, r2, lbb_11 + jne r1, r2, lbb_11 + jsgt r1, r2, lbb_11 + jsge r1, r2, lbb_11 + jslt r1, r2, lbb_11 + jsle r1, r2, lbb_11 +lbb_11: + exit +" + ); + + disasm!( + "entrypoint: + jeq r1, 2, lbb_11 + jgt r1, 2, lbb_11 + jge r1, 2, lbb_11 + jlt r1, 2, lbb_11 + jle r1, 2, lbb_11 + jset r1, 2, lbb_11 + jne r1, 2, lbb_11 + jsgt r1, 2, lbb_11 + jsge r1, 2, lbb_11 + jslt r1, 2, lbb_11 + jsle r1, 2, lbb_11 +lbb_11: + exit +" + ); +} + +// Test all supported Endian mnemonics. +#[test] +fn test_endian() { + disasm!( + "entrypoint: + be16 r1 + be32 r1 + be64 r1 + le16 r1 + le32 r1 + le64 r1 +" + ); +} + +#[test] +fn test_large_immediate() { + disasm!("entrypoint:\n add64 r1, -1\n"); + disasm!("entrypoint:\n add64 r1, -1\n"); +} diff --git a/rbpf/tests/elfs/bss_section.rs b/rbpf/tests/elfs/bss_section.rs new file mode 100644 index 00000000000000..3dc09dcf198e52 --- /dev/null +++ b/rbpf/tests/elfs/bss_section.rs @@ -0,0 +1,7 @@ +static mut VAL: u64 = 0; + +#[no_mangle] +pub fn entrypoint() -> u64 { + unsafe { core::ptr::write_volatile(&mut VAL, 42); } + return 0; +} diff --git a/rbpf/tests/elfs/bss_section.so b/rbpf/tests/elfs/bss_section.so new file mode 100755 index 0000000000000000000000000000000000000000..48c14f7c1099d76eb5d8d4197248280483bbb388 GIT binary patch literal 5424 zcmeHLO>0v@6rCj1TI(XMAYF(&mkJIqG$D-}TWrKaEOjH+O~`wFwLz1VmnSs1@K^Xh zx^wGK$zO2mCf+l1&m)9x#HDj#-nnz{x$|+D1ZKW+Zci;o!bM10tL59Rn(>7y>zI*iMCfY;Js z#5=wlzCnSYKu{nk5EKXs1OdoQ&;mC+Re^^W<6j@r#X3|HE;kv6SC^ zchGCJFVbe(8lN!#Hg$9 z8M7`9YHJz*AN71sy4E<)H|uqHcdl_p@eT!>OG7>1u{b~K5obNmGZ1sLp69x*KO~-O zp*VAUV@EKOwjqu6kr2R%w4Uqa{y0D5PlSsY^E`jC){`$3XRCcHQOw7EdY1IHp1*g- z!G#gq;&bgTp`X`Uf2Do=&Y>aar+zOa# u64 { + unsafe { core::ptr::write_volatile(&mut VAL, 0); } + return 0; +} diff --git a/rbpf/tests/elfs/data_section.so b/rbpf/tests/elfs/data_section.so new file mode 100755 index 0000000000000000000000000000000000000000..8c85d2403758761667d493358dc4a8e16d4b5710 GIT binary patch literal 5432 zcmeHL%Wl&^6rCi6QpzF}2~|iSvjhoar%f8MN<|9-nF_H$JQk5LE{z}#a*ULS1z*8` zbk|?tGx7^;*@Sy$?lD!>4Pwb$X?*V7Id>i>Gm&P#?I(|#jfSDM%xYajbf68h4d(;` z1JgGx`L3Ha1I`6BwefMr-hh$Dh~%N|qLhW^EyPER_)2^jk$FvtAurNA^viL8$5LR# zORnm_UIDLwSHLUa74Qmp1-t@Y0k42pz$@St_^%b%luL7ji?bNu1Q%?vueTkZI56K8 zZz--QUR4Yv8TtB5!0WoXrtNxNH@}w42}0n)aF>TIq#(8~1-9wHy)B5}*8VSR{EFgr z#lHnNS2qiM@s40EFRF6>c`_<0M(n)!j&N6_$*B04mk)v@*$d)s7=_z4{agoQteBt6 zvyC?;&=3{y8-T-cUVzwa+Ib;Pdo0d|WjcO6@36{el?mU^i*!87tTUaD9ag0Y%ls1> zC6HFB3A4#~EK4<^kgD{Z;L)@yo#)ViIGpBLHJTJ*FF(w>z3uE?_TftyncR0fcaGw0EAOQJv=e7>x7*vdO*?!7+PefH!b*YYn2O;EUh(3jOoH(IR=8pNop zs~NK@4su%>03Z2$S31@>_c`;md%M4}SM|8m`QrH6xI&~}(bvf4K@S?vbzBw)z=TkbRexURFiM?im z(I`v&`ms~?QylU1RoKHyHMzbf*Ryh&hU4~~1o@G2U<=V>8lRJtRKqFeV-x;3sYex8 zQMw<}B%xAR@4!+NL@C7OLA)XTh02E~?#H#1mj3MLdJ)BP(*OO~jr-vrJcy@x&NQ#r zkZJ}LuG~bS^xw%IZeKu*F^$hj9;d_D@$d+r$B{WaMVCU9r=@+t)jGqJvk?&ez=O=I>Th6|*+jJXx3PRKro);V%TMccV>ylQ z#*@K@s0I;J=L31#3}q# zI!yOmtHW0!*JGLwbxKNGN9a$MtMM4ggYqGT*ee;o&HcK%dHTik^qA9Ny~cXyG}=4l zCw$Z0RKMt92iG^;wO&^c1;n>`gHEaunNRDeTyq`aYhfPY%t!fj$n~JT`h(}6e;%nI z=pLwZeUOh&vrgeUptB%40ysoregh+TqKXa_R)_gAZc$#8tg)QNYvXRR_bEk;r9G0L z9M8B;G@l{9Js|9PNMpbLA%xLUnn%$wnm5DUC=K=t>A}=1+Bs^GCfmPa)i3otkaYz4 zehUaN1ooGrV>GWZ{rv>qT%*gY=uQe7%MTNu#r+yE+tYY2wf7{5rShO?;zc#HXp&h0qBst*XBu}?KZ*TlJO5Z8 z%^LAR{?}uFJj3O=7iFF;WJf-mlxK?gNIx{LA^l5#nEllc@+R%oNouA#NyAGu-g`v% zcS@0cYpx>xGVYs9*T?I(*xmZFTQy;H^n@;7Uv>+p$sX6af1;yvI=`_j!}+HSMQ)G+vKn4ePrKzcCv zPOqYVZY(D~O|PPYjpfwu)2r6&3LDF*-=kF-PE)@}tGJ)pUl9Me`A$uyWF9=Ul=vm> z3@Skcdq4CflIQAc58B^OZ z{~FSBV+rXk8ru%2Qss}+07=TX9`K}m9|8HAw0x;yTwpLC_==oJkMgYnOyrdL+vNEj z+XtY5|CIRY(oZ;E@DYDKdYbyZv5wmb`ap`9j?@kP!{g%^jSCriHy$ZJIwh|ya?X6RPh`nwsV|}q5N~s-s-k_gkvCdF4DE~U>GZ`NkV##D0XH+r`>TB`4 zF>U6XXA^lt**vJZ>%Wt==q_2D?u z{#d~EqaL{a--W$s;}Pn=WIvyV`q7Wrme5y|^@;V~26<9nlkK!1>N{(sgx?qGivdiQ{@=r^hNtqD9n58(Z2x|`=^d=BtNpa%Kka{$`V$UMM#gqlwG ztU)r>*8dlO0>(Geqg)^VCs=P7Ps3>{J}Gsh4rL!M^)&c=0Ml4r&JpCCK-SS_;+)`_ z7XDZY%h7p3l0W7M|CIVQO(*rvw~y@9D{0>&E%pCc&(p^Bq~FFB&I_cSa;}8+?%SkA z(|_b#N#u;}!E&+(NPj4?uE=>-(vG9uM>~En!58~wBb1eKgCQj2wD?D|em5PG3F-a_ z+AHPcTten`R3rVRyWgw}L^2-O-xt2PFF?x3C%%2#C`24JhxP&OWb;wNhr55S^%~V^ zy-#;@|1n<~7wD&9d0bzyJp&)Yzm&!>x{l@Jb4@IV&-kOuby(zf?%?vn-8X1_1d1Ui z87I;o>E5)?4|}_{yr`G2g508?w&Uo4E*8s)^frzo#)lQIeo!A&iuKBRhjf3Q;E(%Y ze4=UA3%4t!{)g6=DKF{;AMsCRpCEpCa@^o~r0f%#$Eh6dGtn;)Ib~f#Jz7A5@_iKg zB*|wE=r8jlK1lqKJ_`*TR^(?!N7=vT`4jtjY!_nxj_p|Ne>~0dfgI@HqTauhm>1aY zERf*&{gF*Xi+}rJyv!xiC=aR-Ltt~s9N%KcMt~**YoMmf(xj2 zK&Z<@xF7SuctN236i^-5&Gnd6Q3(`)A)iY=v=nne?m!1xKZ`*>^K^vwt)j5b^Iq2Z z#?8bpr3N&C$a{HvNFI^*E|7B$O4J`=Jwh6Xa7xhzu00<*KaqUYi*7%Jo#^kMlWQr| zk9vgv6v}tOkPn8Kibh-YxAFjh@Z3HP0}dY1qs;51fA$BspNh+CawX?}$@c@l#PM0D zd>pT9q2j)Mlx{pm^~?N_{Xz;xHBtzU^YX6yuk{go>A7wHwy_EZDLjaO@$;20VEik8 zJn_p0_p6j^(lA%i%^4(ta~vKwGn66iLq4|*X%hVh?F5d`--3Urr;}HceH&Ky==|sa zl}~r^`vX3o#rZsSyC%_pCe%-$_ejVeVI(_WL=;?!$Pt!x)w;FTs-0dbEMh9qI z$a_GXpLmqqxRv(V*k31Z)1>;3oHNOKkt~OtiJ#Twa2~*au%28bKDY9CKT5Pcka-ryCywv$B>cZd z+kU2bgY|7|k>f;M9}F3gi`RQTFHP$EoSvtAK9-P!=I?wv&nCyK$j{>^$&dUQn)gZk z1kO8}u6kX2iQ5UmrN*r^J{rppkzVfRalP?K+z!-``03c*(fvvQAn9g8(?c$^5|lej2i{v&5d zUyo4#MSh0xjnv=6y>HOz$Yp!Kc$CuY|3&?1UFP3wjKJcJ>xGyR19IYB$jz;QSBjq)M)@bD#&Y!^0}n1U=9`=ouY+f{1UuI;-1 znD0{3W59O5%WwY{Xv4eI4C9E07~S5Al5-#G#pj*?%oTC zq`T+$xeD%t$oWRllhXyne@A~DIX6G9VK_I%*$?#sh;K}#TuNIj!Uqy~%#X{X9+6*k zzjoiDJlO|%FPW@YkK+#kF+Az6RXW6aGZ1TBBmGYEWZ-`G?`=({nmaj6*w+atn}R!q z_%!dQe9@oy)$zTIqr_MC2`N4o=(DICuFLRWd|oo#O?I$v7v)FKaCw4NliLv;A-Pk$ z@1ExQIRcHvarQM1Wj=+KOpUh zo~HSW>&*e6z;y#z3@f6K&;72ZejXU33gp}l_g|QkMg_o=Qna3|9MefP-HWY&{^9pj zjk{?b3VUr`Pva??r|I4;nhyIDA6dtnhbf)XyG0l$ub@36P;|aMr1HZe|;<|_OVF<$l>=5-+^g-eu@wmlv50q?v9sR!MlY&zkfK|Hp z*LXkw4V@M}N%?kq8dm>>=#SGlmi4aQ&+R-){afxQy`UcONvZ#=%Sr#vg4ogy3}?Ar zGCpvmT@(%iq8Mtz7qmG%wvKiyJaN*&ed=DTz?O5US2Z{ajGLp9m1;<}oFtXU4p zUS6l7k0J{5=70{A`1SGp0(yL8eUkfQVt?hHs?4|OqpUySSEKUs-ck6b)XzwMmNUYR zfG3i2Y+vttkjkeQWpzaE^T_+Xbc*az^c>koIp2}{V|}Zsyqqt}d`T~&^J}?p8<&rW zenr98k>)2TE&Vjj4Q*~G9Bmg;evQKr)+yb8iE=QdmC)mP`#DGtq-eY+`GYH`?@e6L z`B)0dCHUaFD(mpdCv>*zWBm?P$S$t%R_TmsKIf8q*5*1D!o4As6W8x^P-1}G2NfM5 zKGCzJPkHY(km=EUWj$NL_g+){ekwYU(fQG%JkRf-j+OPGS<%_5Zw-aQXZj9GoARNa z=0Lvjtuz2-{K$Q%W`@c&u3>*x?v=ZYznA16mV5RGs6g}qmTwKoC-*L7ey0>opy+X$ zzv8z<&$3>ApZE?0pg4F;JA=b`C*Uash7lf_C+WqcKa*bGyS4XE$nUv*>N?GDC9#R` ziBBKE(Wg|{b%VCMjYr5{rMqZ-pI@g5Z$7NaK|X5#mHn6>8h#Aj_mBJA(5IZoNq@pVQ|3ePVM{kGIzas^>vME4qj9mLNRQ)Q z`a{-@Xc^JPc97@S{JKN^pHlxA+hLSL`ZL{y>onL?UdLrWE&R-JCVClG-ywcv4pj6U z_tUf%Tdf$OaTN2tf}Em{G|MUN6+cSOT}2-QOTQks2Ww2J|ESY)uNw7^Lv9EB!S_V+ z9(g6(=YcgJ)`H4C$CX=kQtx|l1`Rww4s?UhL*#pd9boK`uh?bTN36I3QPAu4I#4Tl z9Ed!!fA0Gz<;%Dj?!8)%i|7dX3w>lZ;G83^3FSWF4j5#*9WYPv>C5OKmLtXY@u#N< zAAta8*w;A3_lL+@&L2ZaH9;?!#wXVQ;=d)mk{JQL()-1}64OC|eDGWxieDPO3Je}R zvk>4q(6?D9RX^4T52i3osk?N>G^t77H`ek&8be&qkT~r5pEPfIT&3W&1q-HAH({Wn zcj-ViM<|?y@<{p|{6Tqe-eDE@vHpvX5nbQS6h`l)hROTBxIE}rmxoZ7=k=-h0WM$S z^6#P!c=h#@{d%?am#gx@tgYYCwZG&DO|~PJD|U@pPH->ygz&i z{)Z2SMb_Ia=m9lVWIag#<99=JvzEdQtQVw5OV$S=KIyAzV8!dmRkvwe>;d|9D2LcP zwD)t69@w;6)8V|rbV{w#X<0X$>{rSCg7hLfJeBgzw`p?V0}SE8@jVBj*t;D7{}OSQ zQ|eE5eM>7r{Pq#Z*Lq?@iz>|Wn959UA*^Q?ZN?mz<;`xV_z|(FWO?OlK zQ*e6_=|q2WZyo&&e0G4okQMqP7~;8da{fiwIq)>0|M0T$mdBJdPx zn5(|+l*ajvxpwT+Lo$^=iqEPg*1M>0BsRv;N zvSgmG1La_PN-9_mUD2EzB`*-gUVfeRil;UHLQMh|#wT_IyU!>mw z|L_ez&aakokj7_hmk!kKp0zrH{ebU<*QUOA>C(BMrZBw@a@KZTqskDXUhu?iAdTE{ zoYKAl>9D_>pVqKKd!psv%4r}+eb-rkDMg33qR$;buk`~V>O=MiSU09uQgsUFH%4*3 z>2^W9rCmz^fCuG)*Wj0u?V*w&Ssh_11m&NoG$Vt?`eaqke(;9nV+=x6^1yjS}eXEkXU zsNgyR(7r$w`u)_?pcj))&dHN>znh?oR%JA!=%)1)@_qr|+ooA&%)osNs4RXzFZDz` zPv!np><_XX6#p-#U&Hm>ltH#i%E$M~Hfvb7=kqXNrJTGkFwddozB=k@84Qfe=vhzy zT)AtL<*ujtWj{F!;|-dwq@H!4?4N=D`nY{-yZZHwLhNUJa2~-_Rs(rHzec6O)7+&4 z)yLyl*7Lp_2#?Tk!PA(c5XTep!TE-)2OcDU{Czf}zz-)6Jo3Gl_@#Ym!l#ERtn>PZ z{E*N1GV&Wmo>*T2(W4%rd6!a(zB}45IyoM0sEh}@gWBZ78)heKyElxC&1|So?g=J> z4fV?I4O6wrkyxLvdRQh|DLf@EvQ8sG___~3ucQChh^Yb4yJQ74ZO6{qIXiE=wrBfx!Lc0Mah$A^bMlVsc#iKBvR2m4I@xSCm(6G0te5q( zg`AbMb51Us%jNPpH|OR2Tp@4e?YxuE=5zUc-pzY?KVNVy*LEE@>*n0N>$;xny9Lkk zY|rtsUe3#VuIG8aSMV+0_8mX#=ls0y`kwFmg#w6Jfa(jtx&Xxr0O^jS{=mPPx6a2U zeH%jo3@W;@3w{%f9U7hUA(HLlIe3Q0{sj*I5cqPI=7#rG-@tsY1^dHON1bMR%)&7o zKjg}(3s8={WTo))#S9Z9?I}X*hg!><>AaEeLK^43JWFV4)5)Y9{fuLM86k$S0O_TP zv?T;&YY)eY+ZQpA~qK@z+C`=w?Qhx~UDmz74L* zd+Rfk<3&AP)x0@UV6cjNCP#`YC{5O8bP^0xc`7REx-u-MjNefs?X$up|BR8|4*Ldt zB>%jTzQ78W{9bOXr1u+Xp9g^CZ)dwMX^T5Z(#MVTStG3v{}b}<;|U|>_hSbn@-Gv@1ned|{ep`>>i={X~v$+XfRG}0H0bnmKG`h7BDbn&A(`*53+$3dHQc}rO#|?P0P1yLNDKD zNqUz-KWe0>jPyPuE#C+TzavKeDI+~+q|X}Z^G5ojk(RrbQeXe(*7nLBHpw3{^5t%s zA^QX@o;&dh<57uuP=^-P%%SewJ=_w<<&qyCP(x;5{C@)abp8ZDp-0ND~ zbJ0lmvtKOb*Bj~WMtX|vnb4mx(iYn*$=|-JwSMa+BfYvcz2vR)X*B)!j7{tg!MVNX zo;xzOJ7^8&DZkxc97Br~eFK!F8Gr= zT@rKpPmG^4=)Vbg7tBzxk31~ZcQxL9nJ@bzSm(OCyOhQ9pWzyQ$+$Rxy{x|JaqF{)p6z zemQF5w1K~u@xumAKMMnT5XX@j{VS$lV&IQ4K4jpB7|$5^Q;hF4@Mi$Wc8xNAMAIqt zW2Qf3(EptAb8K&=z3`fs+H26imhrO&em&#*5L1{yxRUXm2LD?aA2skS<8uanJL9Je z{4T~X82J5+pO*!RWqJ?e+YP+T_!3@6qzQW%UvJ=h89!yJ2JSGvuTMBr zcpKno_rwpBb>tq6&)YrOx7XM%_42sNaQG0=FJylnX@ehbgTL4Y|4tkH7j5w6VDA=^ z$8Ce(-v*y(gMXwA{w&~Vm&DH#yZu*y;~RYW&4xklhVSrt;WMGor@_vn{Xb#g-(_6< zOzE!&V4sBa;(rOB#~EL5@Ogo8**{t=&&zGvwF>P1Li*g?1}_4RdYI#S#SDFl@zVzW zea6o)o?&@z1N)17eD=dm>v;v|k&Itp`YERWGT=C#w{0T8V*KxzUe1(`GmgLMhb89Z z;DqtF1CI6f@&U?zUSB@V^u8RdF#Ts3-^Kna!|Dr+PZ{)o#`t~%{|}6xG4Q`&{G5Tm z1USm6c%LtR-!)p{U24dXhpuCTJ`Ma0Oh0AdYXL|82N@Utfj-NI@oCUM!hFuMUoLw1 zAmir^`j0Sv(ZHW%e4D}NcNpJk;Lk9A%D_Lv_z4650^{cmyvg`s)~Cq-HOBk-d_?eX zFg|C{zr^^ILH}PFzi80Y`VD$E=zk3KIG*?Mc;?}*ZiMv}?FpWJCZDYuzbgJVU+lxA zUXQ!fSpm4vEaQiHUnIQV&-gZj{vhMM27ZX~jDeGX(4{UK_)(@GGVtexKc6FsJfCM= z&YuMTQ^wC3^j`%W^&{VGXSu)yjb9aivz=l5M({IG&vUG25$jHkcPl(|#z)M=l=e%& z?*)6DeD*PZ$iVkAKF4-U${b|;tU-T>@hLgj<^qQq-(}DrW&EN+|2*TT4E&D(PmUkS z{x;)yHja{4kH_gTzjKo7WwS>DM#f1Af&) z>&t4uwSD7rLE--|wI2rdnQ>;LLZ+89cRk%8^D}^BdwYk3lERmmUe4lu&Rzk28`dk| zz{xr?0yvJ7vph}&e^TS!@wdt1_BC|`h$LO-eTZuRydr5_$X>US8wm+{k-t-c00a@x0s0S?YEe*WfG{I3~5ySf$s9^?H6 zpTA{%YF#V+PZ>X9(7(+11q1KZ{xtaWYg_p&WqiuO-@y2sf#1xyGTLP^zQn*cGQP{e z?_j*wXxBT0-oSS z65|&Q{^uA!Y|#He=necQj32UE`M=Egb_4I#4hF272ELT>ZB8qn8yP=t(642D&cHV? zuIyGm1;)1-_!hQ$(#*Z8L`x)P9;2&muzkxr= z_z?sDcZ{Dk@aGuMh{MQgdO`Rb_@4+L1OF=H`(%M-KHp&cRIXM3XBk)dR{Xn+A2H~E zAp8ybp9sByzs$IA;5|#RzAm-Xz^`Nctk){fje;BWYZ<@bw$g84e1D-8FED=2(9agZ z4f=Z-=g;wCW;+-^Xz&RbKV{&%g^$61FW|T?oM&9l8J=Z&^_I9w9iiV#0l#ZwD}IXU zcN+9BGTv+8f6jQuz`x1(1q1&!<3qQ#*84Yrqkk)R@8CE^Ki4fIeEkrm;iN!6w=A*$>BaJ$&AGBj^+S|MMB0R8thF&ob`c!~j?P z8rXm!z5MouEaww#@NY6Mzbzy5lf3WPxr+Fo<7%E~+-LuA+ak*NOUBjhlpbRGHM~z5 zW&1DhJyw99jdJd1zi9npD)27G*K`8t= z_&vSn*VSEYeAMXP$B_Yna^)~oe8~mefaJ)CZ5dR;v!PBsRUWk6E4Zgb#{_!^W zi*4{9w!v@cUs#^aZSe6n_~ADAm)qcf-v)pE>lc>ijyCvY8~mv@_$P6{-MuLO|3#A7 z`cFPYG4AtxyT}E84D)e`6i$S11v|MA{##J*Lbw{73HQyY!P;acmk>ymQTiYxX+UnxL+O@?9K$Sq3(w<3aK3{Df41;pPtypk*wh)xV>U_KN zTK68`QMT>s*u+?Uv^Y|moP`Fu<#IK!gS_LoUbXDF)WCd}M71G8ZG{Lm6Cwqa7qe_T zj_EdNjFf`8Sya1Gec_$K?7PA;_QZp0wyfENy0+a~SPyFthefz6U7Xrm9|ggx)spLl zg;L<;>}tg=uwVrdtWdOBvO+OWViZKyf`|x2EF#2MFo?zUw(bc#518&D$9IcaWQGyy zNQh{1M0AB1p)SLi)`f<2MLI@Zk7O|m(1mc-YSeUbEIZF77Q@jR)pT*L6Z3Q8I-Iyf zHZGBk>9R3hcD^>hb!d6{mb7iV1qByS>!X8S&Z^pQuiFpZYM8S;9#o}0!R|0S=!BJA z$*p9wdCRGWS)X%xT2^b5kAxG&pfoneARmd9o4?x^5-R!?pAv8w6GuNa6Zu;0#vs z1*e>Iv!!x1pUYPB%oy(14pzz~H(w4*ZcqyIWhdsU+w6v6;1ujE+*kJDg};H_Cn5r8IY~dRI>G`o&`(g@f}|n;C`i= z_i|3jwgNANQd^-Hf{DqA!II^doQjooO0JVDh1IO*1_d|o6e@7r-t&UNyj4*8@-S!@ zE((Jh!m4HG3SQ{ubDkwOZ#1ZnmSC1o41!37yr1)YD{$d{evW0+hG8)4fnh7ajr&U0 z@t_~(1<#>r%udbJ9Jf-NoGOk@6u~@}L!7*O>tm0Edh&Xsg}@JC4h41@rfb=n7L3M%o+{=(9lu=2m-B&bRdc!?$Kqwd;#C9yS{^K39V}iFfPm~VM9BC?TE1z2+&jmu=5D2M75D@|`IA#by3os&jp~8q( z7(`%+>$FW-T%$T`J}h({lj2I)8CNZ{&z%dQ?otT#`U;`s>yCvqH5(&ZRS@aCu3X1b zWj&>BDNMQwIDcT@;7cywtC%)?a;olE_fC}Y(>C|ciTftXVWAA>zc>jon7HssSEb~Z ztCgx(actYuV;y9Fm4@8ShOjV$IfLcOD(AqEvzp=KGOLu!fguW=AXo6qPUr_!m}ixI zHS{a_L2Zk&t~U7#clMiCl6MT% z>v4<%X`A_~%i*$D z$a&e4WqGjvzLLI$qwJ1Hupdv~D~|RoJM?*%^zT4~_81VMMFS$_;6sFL3KG%sfe8wduS>`!u9db%FtT`!f(V!NL|CslPsFtY#2g?->mL*p z{t%<#rF}!_q{5x_aco$#ovEM#9_CC4AL{P09deweYGHM3AI$JvCCFz3uaftw*)Z#p z?}EbRxM2{Rub3MK)y<2=N`dCg$7ngmCdN$@m3c+plwlpLMxcvpbGQoti|fe767Xg# zkIohD`yj%3z9^5^PzUy&QoAgth%tMBxN=!HG1KNIK#VKbi>a=uATh8XJx zVlflmBm(1XT$!{5mi$EF@bS6eL zUySHVHBk(aP7u?w=}fI(9n&>oj9RT@F&%A<5u@$0j>Tn3V~COeqGPd0pdDOSWbkGu z_Ks)aV{Q)ya&fPlgJD|9mOKmWaHU!(1Qzaupif+z%_~N9lQE(t!iW|ZBf6m&(OrZQ zJQ33F$rUY$4~C#qS_k@~W7>S@GB|GR&s|Yo$92JT8J~iW>}$o+-myLKeSCcoe3()Q zTh)A6_WW|5_uh08!mbv3TI_(a(`|t^JLG1ARqB{D-zlkOWjiIc!E$se08F?}Nui%Q zCG|SifMsI$xx#YZsj__Zusa&3w%>Ud6zq<<7MxB=oz8p9 z>AVXZSYkTVfCGzX$D|!k7zdWDPPu}0@0hf60bujlG1rdAs{`v`hg{*g3?ycKmIWw?VYc+2Q4{e&DxCs+F=^vcrm>E!l9G1&hC(y}@ID_P)mKPRjNRvp$V`dhoep@fUkP? z?wPT((5&>FNcm_tq|p9 zURjHao zvj}w@=Bj}kz^OExwdPB(mlxUQd))SEg3(VA!C`w?-VH_J6ejOE<-p6qSvH(2%@@UH z<@Lr6$SST?_S}4>P^dUo;J}U!MN7_NpdK8g_XBNS*BTrATnPqeF3h{|MiY)glJ6ma z*Zf{H3tLT)0N!@Mj?ykwoqUijlt?+r5ohQ0B+p&Zuhd0hl3eXs^W7{U)H*%hl|Q~QvT)bDEx2Qw9!R-K>#r_3(A@Truu$@dfO zr=?$;p}7wXO7IIxHaKfe;Ms66o|tZ^^7)1vn{4PS&(D|2p`VAJfy#wdI1-U=21#rA zGW^KYjGcqyejU^AQ|j0@@X$+zyko;JF~O@R7#(Pvycf9)@o9=zvmc$i<|Dz~g zW59oW{VDz(f(+kp(Eq2vi5hwnag6rBKO~iVWb#{464LkDNJ)=b9|oU0S%<@cf_l<)baf2DqD ze+9}SPYeDfAOCJj>Fa8V45?qjamW+d6J%;TmmkUydMyMGKq0AL+P@cY(LGVp-$U=E d@bM%Faj93}_d{f%@;B@zieq@cC-Io&|2ONnNZ0@X literal 0 HcmV?d00001 diff --git a/rbpf/tests/elfs/program_headers_overflow.ld b/rbpf/tests/elfs/program_headers_overflow.ld new file mode 100644 index 00000000000000..ee60e1d4c7cbc2 --- /dev/null +++ b/rbpf/tests/elfs/program_headers_overflow.ld @@ -0,0 +1,24 @@ +PHDRS +{ + text PT_LOAD ; + rodata PT_LOAD ; + data PT_LOAD ; + dynamic PT_DYNAMIC ; +} + +SECTIONS +{ + . = SIZEOF_HEADERS; + .text 0xFFFFFFFFFFFFFFF0 : { *(.text*) } :text + .rodata : { *(.rodata*) } :rodata + .data.rel.ro : { *(.data.rel.ro*) } :rodata + .dynamic : { *(.dynamic) } :dynamic + .dynsym : { *(.dynsym) } :data + .dynstr : { *(.dynstr) } :data + .rel.dyn : { *(.rel.dyn) } :data + /DISCARD/ : { + *(.eh_frame*) + *(.gnu.hash*) + *(.hash*) + } +} \ No newline at end of file diff --git a/rbpf/tests/elfs/program_headers_overflow.so b/rbpf/tests/elfs/program_headers_overflow.so new file mode 100755 index 0000000000000000000000000000000000000000..7b3469358499e360600e9c44725a5c90bddbacb2 GIT binary patch literal 5408 zcmeHLPixdr5TE^{-CDs~LH49MieT7aw_SQ_Ybyw8@gSmJ)@%~(Qa6!ogzm{t;dk)p z$JmckZ;J@)%zN`oYbb(v65hbeZ)V>7-n>lm4sSjmhEJP~20>c1VyyF}m0g{9yI~cN zVV4Hf=I0u%k}$9FIKRaYLFEBG<0S&px^)Pz&0}3{1x!=NhE<3O@a9UGYaH;o+CcD{ z?7z;VtGuqp0l%pYM1FNUa20SBa20SBa20SBa20SBa20SBa20SBa25EkD$wN%v|fSD zzGrHD(YJW0cu!I8=pwf*P)9D{EZPpJ*1BWemHlJ6%*CbQ=I$*v?&)h{u;BhJJp_@3 z1-hYlQ}M3i--6`d;9b=F5tL?SG5eU0v(hYydEEQMadw&(J6;$bctPLq`I~>vEP2}W zeOZsX&yEH)tGZzQR}$E4Rqqep5i3{j*DuqvlKdi1qB0^snPt&r92;din^-Ih4oE-9 zB>8bZnea=J&(mczVmz6ah1D#QU2Hf_<8qv5el+MO@w=_9NAdelevgK4j)TqLL;5fG z!r^|feeyWoP6wkwJWA3eNYmi(ay^F!uaEbRo;|@k{R>K6Wk~Nv1m6k$CeL^;9;h!o zE361#`adH0_tAH%$JcFXYf0{wkXi7*qK|}r5&mEBkw@a?Uq}AAEW(2RRnMBHhbH?3 zA9+0XNpA2Ltcxr(0nUf@UYs}B4_H~{0Y1)eM-MdiiF1zp@CVG5ePN%h8tEz@=N^8E z6#?$Bj@)KKA3f!tsV(PS_JI6&|9$qM_8My7tMXr5cCwDVaEGj1#s8`N2!V=q{x7k9 Bo#X%j literal 0 HcmV?d00001 diff --git a/rbpf/tests/elfs/relative_call.rs b/rbpf/tests/elfs/relative_call.rs new file mode 100644 index 00000000000000..997e5d42403fe6 --- /dev/null +++ b/rbpf/tests/elfs/relative_call.rs @@ -0,0 +1,20 @@ +#[inline(never)] +fn function_stack_ref(stack: &mut [u64]) -> u64 { + stack[0] += 1; + return stack[0]; +} + +#[no_mangle] +pub fn entrypoint(x: &u8) -> u64 { + let stack = core::mem::MaybeUninit::<[u64; 32]>::uninit(); + let mut stack = unsafe { stack.assume_init() }; + stack[0] = *x as u64; + let y = function_stack_ref(&mut stack); + let z = *x as u64; + return function_sum(y, z); +} + +#[inline(never)] +fn function_sum(x: u64, y: u64) -> u64 { + return x + y; +} diff --git a/rbpf/tests/elfs/relative_call.so b/rbpf/tests/elfs/relative_call.so new file mode 100755 index 0000000000000000000000000000000000000000..9f24730e209597b7bb901191dc47dca8bd3a6887 GIT binary patch literal 5384 zcmeHLzi-n}5PnSx=9fe%5@JG@m>96^rb!HRD560^3Kb(|i5$nFp|KMj2Q})z#EP;o zFv35Ql|O@jfrT~P_ugFzrbtMPypvwOdw2KUJzx9=-`iIEab;~yiBy%NcP<%V-K7Qj zvT4vI*j1sb((gs(=$LbGZQ#dCfbD?ofbD?ofbD?ofbD?ofbD?ofbGD4-GSMLd3=We-_a-fBV4Hu z+!g0CDa+;Z^$lYxWBN#2EP{8zcY*l!uL znGKMRZ+$JqMQR)ea?^m)vj@_B_FrQA6-GWb@BSW8Ym+LvgwUVU(voCFJ{yk)S)tm4 z>?q0ayY2R#8`M4D+m@i;5job?H{H8cFP^ueE0{ViI}g}?ObD(#???D6mD70#wvhL< z&OEPIB*%sFdb2D_2eDQrv(#XbD=!|UDLs?&$XY~Q!ox|C8_l4~lVMa0-Xxtk8V`_n8g2BWMqDWdqOlPCS4c^HKCFziKJ{d%)etA$DH zEb7)z>NF)%t-0Os{a)nvlX@cxqLy0ee`oJt|KZaok8}h6@drJ|I(^@GFLS4PDSvsc zg!=<-mE#bPd@`pRvt7@;NEbf8{<;j_ZQKe`Paq3sLw{a;2qMX_G;)e=Fi!PbK}mI&mWLYddHy}zB0f3 VCre$4eeehVZJt|xmO#cb{|AeCfu;Ze literal 0 HcmV?d00001 diff --git a/rbpf/tests/elfs/reloc_64_64.rs b/rbpf/tests/elfs/reloc_64_64.rs new file mode 100644 index 00000000000000..ab2099892e7755 --- /dev/null +++ b/rbpf/tests/elfs/reloc_64_64.rs @@ -0,0 +1,4 @@ +#[no_mangle] +pub fn entrypoint() -> u64 { + return entrypoint as u64; +} diff --git a/rbpf/tests/elfs/reloc_64_64.so b/rbpf/tests/elfs/reloc_64_64.so new file mode 100755 index 0000000000000000000000000000000000000000..95e03f5468190a7c219a10d1b0cc34b0884896a9 GIT binary patch literal 5248 zcmeHLyG|QH6usEILQ?RMB8n*54yDLwjZBEBK$u98HW6(q9*z|(u#MImDX!or==~8C zd_+EkmMZSTJu~-gtQ8GPJ6E1LckW{zXUE0dtAp%QZDvL&by+A!uAMbUDuNf%bYr}j?+90n2$ZNyrLf9Mne{}&r==6(xbUEnu3qPbE zk!V@Bf8NP=k}PW_jV+hDW*PRRf}B}-_*->uz)-9Cx<}YWhQ}1l%N3npg}Cpd z{z*Uo z_K%+~m8s(&wZ47b`|^1|Z0tXK@`suX#h-hsK0`aOT8~*!8*=M#s+9E{%O87ds1-qF zKJOV2cMEkP2=5!~N@+0rS`+rgeBMJK)@DBMVpEBZXQIv{SU&b1i>Of3J{jMyX2$@O{u9kW|l3D=gNm6o7z UU7A~}@Y~S_{B8Nwb5-*n0eve>s{jB1 literal 0 HcmV?d00001 diff --git a/rbpf/tests/elfs/reloc_64_64_sbpfv1.so b/rbpf/tests/elfs/reloc_64_64_sbpfv1.so new file mode 100755 index 0000000000000000000000000000000000000000..a68fc3b742e8541bc79d04dea4f5c1872fd1df2d GIT binary patch literal 1440 zcmbtT%}&BV5FUOML=TFI;b2T}>cI^d5W)!~nwV7M)e}N(lqkiNn#jqg@XllT5MF!& zo$1ckkVX#bq}y+IX1<-7-PzYu_bi*q5F|&1c;%?7qXs7j526mWD98U26-nHdc)&l6 z6{P`b-718a=kcy_(s-pjAb45UFY{C<=^MDdoei@ywa z7(Do4+i~5lQ)`&2smE>$f5OSr=U3WcR}|T7a(}RI^W?|8Oys{rWNxQHe>C*s$z(dR z7=>g)|51!&yzywnMUY{0)E_Y3PomJyEE4;Uz1~rsKT~Vgs)I(odEnhWo9cY|z3%np z@x}QGQu`O;{BYkmS737f8YSJY1=b4}&u>w`qYuH?XBPLL^p>_!(r@Uys>T(*_C%gMjb%?a#JhqX@qveIKy3I3 qc2s$zO36`jKz!8SR=v6 u64 { + return "entrypoint".as_ptr() as u64; +} diff --git a/rbpf/tests/elfs/reloc_64_relative.so b/rbpf/tests/elfs/reloc_64_relative.so new file mode 100755 index 0000000000000000000000000000000000000000..0ebd7ccc662179f98896601362798131f9fa3809 GIT binary patch literal 5424 zcmeHL&59F25bpfwX59sK7bGVMo^o+WVzOrU6m=DZQ9KCZDRyQW4VsxHJ1(1(PvLub z@Zh85H9QGkVtv!~#Wm~;#*eC%iXqj^)_Mi)NDU`@UT6s$ znW3r6ykoWvFqdGuhRGA32HsR1IdQPnVOKxci3h(T14O>*w^YBSV1Oro1}7nM`c*}D zSw>s^+9Jb`{FBHa@`hiAIaDB2AXFe!AXFe!AXFe!AXFe!AXFe!AXMPLsK9601@7M8 zWZ*vhQ3l9m#e8v+PqV_WZ+ceV*&$xnbyJ++r|kh=x6QuV^}21ouU0u!z=CiW2l3v% zO_6- z_NN)Xb@O$#X}=|kim8@yf#JSh09dW9`w>`zVi|i{wO{g$3U^+ZXr8CGuqH|unVn6O zQaD}AJTK+~aPMF=QIgMQvPlz(xv&%AZ%&K3mpnIu&6Dv#e=LG6re`i1TIbxr+S|$N z_ff}0vCZ-7exU6-c4!)|ZVO}a_y28Y9*o%B;dUMT1I_~p}kPaZ!g_w%n4 zYJkD-2hYQrAKv_012$0kY%iAcRhbdHraJbI*jHlz zX#M>vj^_aJ@U1icQe?znKdU1$2>#GBf9|U#I>e?w&qqTKB+rLHO4`gH`EJXgKhJMd z{h23vb0Eg2zAHLBlX(IIYO6obz1RM%AM-d817i~dK2ra4RpLD$2gc|A?~9JLS7^z; bsQ>eeN|70tYMiIv`S0ov>=l8^H~oJA4PIr@ literal 0 HcmV?d00001 diff --git a/rbpf/tests/elfs/reloc_64_relative_data.c b/rbpf/tests/elfs/reloc_64_relative_data.c new file mode 100644 index 00000000000000..c493326f001a3c --- /dev/null +++ b/rbpf/tests/elfs/reloc_64_relative_data.c @@ -0,0 +1,13 @@ +/** + * @brief a program to test R_BPF_64_RELATIVE relocation handling + */ + +typedef unsigned long int uint64_t; +typedef unsigned char uint8_t; + +// this will store __FILE__ and generate a relocation for FILE to refer to it +volatile const uint64_t FILE = (uint64_t) __FILE__; + +extern uint64_t entrypoint(const uint8_t *input) { + return FILE; +} diff --git a/rbpf/tests/elfs/reloc_64_relative_data.so b/rbpf/tests/elfs/reloc_64_relative_data.so new file mode 100755 index 0000000000000000000000000000000000000000..fe2e0db33786bd587bb9e869050f45b78320a29d GIT binary patch literal 5784 zcmeHL&1w@-7`;uR_1{)OqKM2wK?H}Yt%w^Vwk3twg&T2|jxjVrlhkCC1O&VA6$F>r z_yXbsWbejD&_{6L4=x(dnfXpghZYwuy&v2;-`}}+?l*S^=6k(TSkt3w_G0A6YiK}&H;wzcu7UG5d-rHmB-jYjKFw|E1!57gCZ0!2OoK54LX7C<5Of;kF?^M*qN89!xqU})_G{noqR<8g-}yN%=@ zpLWssM`=KcDWp9I=o=P4Vt%cA#%#g&opk=b6yvT7H`_15M$~J7hg76FGi#kz3l(-D z(&Okcu+fXVsg|PK_XKC3Om+Fu`rZ3$_q4TtLe`Oky(&rae5U$c@)M_@FYQ?j=3l1) z2{34@Uv?Jyv?O!GAv|e1&j+Q1!C1-G&#srX&m))jE`Hhg>brf0P0{|ux9J_9Ki^rN zON!p(qP?J;k3v50OMJcGSK;H@;%^{{c^k&pdk$A3j7`fDj34->h_Cm)Z^Vn1ob)cC zYkkde2g+d#@rSnl(D;ERX8GDb_s5v{BjbE&Gr^pLkM5>&o4PqO&6(@vHG~Ogo`x`F{XJVte)g literal 0 HcmV?d00001 diff --git a/rbpf/tests/elfs/reloc_64_relative_data_sbpfv1.so b/rbpf/tests/elfs/reloc_64_relative_data_sbpfv1.so new file mode 100755 index 0000000000000000000000000000000000000000..256662fc298b8f99bf51e2140c03b93b888c8089 GIT binary patch literal 1920 zcmbtUJ#Q015S`co0t85cgaQS+0wjdQDFgzDh9HtCkxU92pmGk!!WOX&=L5wO5-BM1 z2k2>#Qu8Z7H2efgS_(cy5i`&C-pN@)#Y%f`=FQB`?(FQnTC6OUMn+7dY(EX!R}%9w zj7l(@w;NXW?{OO+_#SYR{6mZwCbWs1Kn^T9#?&F6&fQF?xRUb+eO_Q(h;PXY$GFZk zd8*dqKHiW>>l>X~_1e{{3rW^|QLnBiSrXUef0lyEFiCwI8}c!ggGI!XM5eLd{eB0O zwt+JY2IG%XohG8D7|XOD$yHO{Q`80Dck}hv=&yb5P?z5@s^-I-WbAXwcdPr1Xc%{# z^qc#|gQ&h%Z)a)mX{XuFY-y#kNcX{c;fi_2XUQhEBI;Y8cJIA=|N5n?f32syh{F-e zoRGnVqta|Ysi!xiN@XFsJQvTz*F%rJI@TDqZ~eWj*+mega?xM1WAFRb_+c=feV{#?dF9rus7TRbxf#AOCzkI18Vz4J@X zA-BTx-i3Pa?lKrUxAZv|e)zvd@oN->=jf>aiSjG{mJ5e{Dt?=SFrs4^ulnJ9uql3r zf-sENe@VFNu{hI=&*AZb>*zl!o6ax1?*`?T5AX}gO_IkPQD>cB^{Yx15S@HLDMg|b32{OZryjt{hNNtJDo{|77Q_JwaZ0_r7DSL;v^A(X@fW!8 zKlnNP4Q_}_m|4$@BC8-FMslA0Ja6a2c0NDOo^(4Mg7s*lT@92v)QvAacn}>@N`3y_ zqD>OF3q0T-<4u(TX*&?2>+?u8PK=jT2aI(;RTtw6)d8XFGJlcBvFc*{raIsyT}$4` zO{lNRyUI6<5B03_fw-l-)FgH-0HqR`Z6^TIq3hB5*K%14rRlY~${O`;$$|}il%9+2 z>i5y7D}kmPivJ1{e}}WcIU*iK1!Et5%4XF&*BlJ8>}Zfo%+QSg9$Dh_>Blnmy3Is~ zx~ra}u28ao*{ZILbGFR$$-TY%t}I9U z$tX{Xq$u5FKONb8T(aj{JEuplpFeo^^btz>FOL+@M4L#}_FekC0$NK={ zeXvVwasIo^Zt{R0?-z(X@y-z+e8O7!Ci3KIYsdOQynFD74?N%v#D!XoGKb!)YYX31|8kw=IpTtr_jI2Aj*=&ERE+h%0SJz09RL6T literal 0 HcmV?d00001 diff --git a/rbpf/tests/elfs/rodata_section.rs b/rbpf/tests/elfs/rodata_section.rs new file mode 100644 index 00000000000000..74add7db572b2f --- /dev/null +++ b/rbpf/tests/elfs/rodata_section.rs @@ -0,0 +1,8 @@ +static _VAL_A: u64 = 41; +static VAL_B: u64 = 42; +static _VAL_C: u64 = 43; + +#[no_mangle] +pub fn entrypoint() -> u64 { + return unsafe { core::ptr::read_volatile(&VAL_B) }; +} \ No newline at end of file diff --git a/rbpf/tests/elfs/rodata_section.so b/rbpf/tests/elfs/rodata_section.so new file mode 100755 index 0000000000000000000000000000000000000000..8868f7e63d7e85b0963ee207dc796f3c0b2fdd51 GIT binary patch literal 5424 zcmeHL&ubGw6rTLhrnaEAAUWwAgW!_IrVTx%wH1VI@gSmJ!e+DDpxKno3eBm1h5v^K z5B@3rH#`Ym;`?^rx5OnE@g&TH+3(Gp@4Ywkm|2*)J`JANYc)e@n2oXr>p%xCcM8P{ z3{2lN<-27z4LG+jbPu0G7!7FEMkEhoO=(vBFit+OB@IHX`rWACjc~vxzJ!wqk$!ct z-Imr-zmDkeBYqVfLM-zZX7_a!a20SBa20SBa20SBa20SBa20SBa20SB_^&JQO)kM! zR`630_u+-!kH6@u9xFan#GM^-zW~!#KYbqHb=&OfxP~*omdhDT$N|A!9Kw71R?#(W z#r#b{^p?iIqxNmZ2a10S$iJhrfNmKePO@}yIh!OIEjf$t0Kp{r7^jCe2u`fu^Sa(% zNn5KRcGLV=RtF!6p=RoNUT_%y4T$wd`5u8Td6pJhfu;yHoX1f%nI&G>?~SAP`}<_xNBw9tj>mo+`=@55p2L&3=SOERp5+Vr#}8_T#{GkE z=yT&)=qtkd(zGE8_;?S3SZU~6-KbT5wSQN*$lZ_z=}3b=-<9}&RPJApM0gIczp>Yu zpZlNhSzVAt;E%Y>&vRkLhTQb$`Dp2Zfgc5S@HLDMg|bsp<)QtPohSXj-eMw6s-`+=>H2z$G}gX(Nb@oJDF*{R>?9 zAN(Bt1~j?#?*{Wx zqfln6j`&sij}#v$%AUmTIiOe)v#mKmIdtC}|C-M$32Aw@RvBY|D>7i)aA40|+)Z8o zw#M%$t}Fg4Nd7ghBG5ttEKroDo_(surLhsrCJi9B5IyIdiQ4T8QmV^ZlSZVP|2zp(!p5g>{vuCZ#Z+mFZ-hS!X(%I5agAijOi$ zVOC8hyh#dq+N2}K^Qozw=aAfD!)cM3aaD$C+|RT3+uKjFv(I6VhHp=!t=}pAS9{5D zKiWBen(Y+vD9%QCkw--l9n!V)93H$m-8*^y414+?lyj9w?z$MhQ-dztym7t=EU_bc zy;jY47)X%K=lkQk%#qyqUHq-jFvnuz`k&>WCUj!VJ+|Z*x zfanjVSkrA`NSo{}^8-El7l=C1=g5zE#9CPsb+T*iSQkX!BaZyQU7kR0a0K6J@Ir&q m?@|NuWB>ac!`Um u8, + key: u64, +} + +fn f1(a: u8) -> u8 { + return a + 1; +} + +#[link_section = ".data.rel.ro"] +static E1: PubkeyLutEntry = PubkeyLutEntry { _fp: f1, key: 0x0102030405060708 }; + +#[no_mangle] +pub fn entrypoint() -> u64 { + return E1.key; +} diff --git a/rbpf/tests/elfs/struct_func_pointer.so b/rbpf/tests/elfs/struct_func_pointer.so new file mode 100755 index 0000000000000000000000000000000000000000..333e12409ac6248557d093b5a381a3c9732c2b79 GIT binary patch literal 5112 zcmeHLyGjE=6rF6MiI2huNGeHTW8s>BfuI(O4-lfgRn&FWAX#PIfWg8xKj5EO^4gsE9W@1aEE#-{({5 zbojovJpi!{WZ)e>sq52<3yQx5@Ajr0C8pVquETh}(roTlYIVQr zul-yF`3mOkBkQpWgXh`vz=yIV%D46XB)m_If8L8uJMF+KcfC%ENo@S!rqhu<8ego* z>6zfwT@t5Sibm$$AZcB6qo8F^62|`dMHtlT_1a2s+4ZYt1vgB|YdZ-_Hu z@V_FPJ@-go1vW~yk5ZHc#OBpu*oV=0_Fs5^7_ZB=or61seh!59iv6H!YPLNh?2Ga2 zBLPvH@w|5fEx>!HY4iqhW=us? u64 { + unsafe { syscalls::log(b"foo\n".as_ptr(), 4); } + return 0; +} diff --git a/rbpf/tests/elfs/syscall_reloc_64_32.so b/rbpf/tests/elfs/syscall_reloc_64_32.so new file mode 100755 index 0000000000000000000000000000000000000000..c765a7cc9a1ae3b01841e4ae5926621b8f14d983 GIT binary patch literal 1632 zcmbVMJx?1!5FKnMd_{r~2~kB?P|##CF^LeO074>=4n&b6ROWm>6ARlHoev3DNt2&| zM9EJ`$M2%0apv94bK(PvWTd?}-!r?jd*46$pGp%GLMqE-CV8gXHp5Ms9wOVaAr<}3 z%Cx}knJ)Slv1(yNna>r7tLQqmIz@yHL@dTlm2!I#FLSjJ83*_0y1rPwBL1>C;*`lt z>d)Swy46JJ>8I{vsB~+7SB9-Po&o#XWcL5kQTl^^7<4*yjfunh z`}KP5t*Zr%R@i7oZ^E++SCzuC)%Q-0b`B3da2x-PI4>;ReGunJXJ#$C5-J-YvV2m1(8LOg0Sp6|j_9cnY4 z?}NxX>XH%rr@W%fv@XV<8bkEVk^5zR=3P@eau(0|hvnhBw=|hAc=oZWJZsYjcx_7l q=kT7ePuA!DcTKM_M)Nk1&tuv@b*c81;q0gbHch$pw^DibP4#cb)?U^C literal 0 HcmV?d00001 diff --git a/rbpf/tests/elfs/syscall_static.rs b/rbpf/tests/elfs/syscall_static.rs new file mode 100644 index 00000000000000..f4fd29d8fc0585 --- /dev/null +++ b/rbpf/tests/elfs/syscall_static.rs @@ -0,0 +1,7 @@ +mod syscalls; + +#[no_mangle] +pub fn entrypoint() -> u64 { + unsafe { syscalls::log(b"foo\n".as_ptr(), 4); } + return 0; +} diff --git a/rbpf/tests/elfs/syscall_static.so b/rbpf/tests/elfs/syscall_static.so new file mode 100755 index 0000000000000000000000000000000000000000..0667af4f1144c07c987ca60f13687924b798b0ae GIT binary patch literal 5368 zcmeHL&1w`u5U%X5#u&jELH4A3Tm*+*VsOYQY7~S~a`56QP9_swvb!TQBV%E1@d@a>q z)hD*NSb4+|=gNbK*Sr|sp#q@-p#q@-p#q@-p#q@-p#q@-p#q@-p#uL!1-|GIJkXQ; zP{cfZs)I6S?oays`uhFoR1x&qgB%vcb+C6$=6rt3P+ulT;IOV4?fYF1_Gc)VBDX+6rK{f*&ao<52n zWbZyk136cTy`49&9>09?l)LJKQuQz`;Ph7Opm@qkK$el#J=Ud6!(vK*s+G#IImRxV*GE>30sK( literal 0 HcmV?d00001 diff --git a/rbpf/tests/elfs/syscalls.rs b/rbpf/tests/elfs/syscalls.rs new file mode 100644 index 00000000000000..544903e421bc65 --- /dev/null +++ b/rbpf/tests/elfs/syscalls.rs @@ -0,0 +1,72 @@ +pub const fn sys_hash(name: &str) -> usize { + murmur3_32(name.as_bytes(), 0) as usize +} + +const fn murmur3_32(buf: &[u8], seed: u32) -> u32 { + const fn pre_mix(buf: [u8; 4]) -> u32 { + u32::from_le_bytes(buf) + .wrapping_mul(0xcc9e2d51) + .rotate_left(15) + .wrapping_mul(0x1b873593) + } + + let mut hash = seed; + + let mut i = 0; + while i < buf.len() / 4 { + let buf = [buf[i * 4], buf[i * 4 + 1], buf[i * 4 + 2], buf[i * 4 + 3]]; + hash ^= pre_mix(buf); + hash = hash.rotate_left(13); + hash = hash.wrapping_mul(5).wrapping_add(0xe6546b64); + + i += 1; + } + + match buf.len() % 4 { + 0 => {} + 1 => { + hash = hash ^ pre_mix([buf[i * 4], 0, 0, 0]); + } + 2 => { + hash = hash ^ pre_mix([buf[i * 4], buf[i * 4 + 1], 0, 0]); + } + 3 => { + hash = hash ^ pre_mix([buf[i * 4], buf[i * 4 + 1], buf[i * 4 + 2], 0]); + } + _ => { /* unreachable!() */ } + } + + hash = hash ^ buf.len() as u32; + hash = hash ^ (hash.wrapping_shr(16)); + hash = hash.wrapping_mul(0x85ebca6b); + hash = hash ^ (hash.wrapping_shr(13)); + hash = hash.wrapping_mul(0xc2b2ae35); + hash = hash ^ (hash.wrapping_shr(16)); + + hash +} + +#[cfg(target_feature = "static-syscalls")] +macro_rules! define_syscall { + (fn $name:ident($($arg:ident: $typ:ty),*) -> $ret:ty) => { + #[allow(dead_code)] + #[inline] + pub unsafe fn $name($($arg: $typ),*) -> $ret { + // this enum is used to force the hash to be computed in a const context + #[repr(usize)] + enum Syscall { + Code = sys_hash(stringify!($name)), + } + + let syscall: extern "C" fn($($arg: $typ),*) -> $ret = core::mem::transmute(Syscall::Code); + syscall($($arg),*) + } + + }; + (fn $name:ident($($arg:ident: $typ:ty),*)) => { + define_syscall!(fn $name($($arg: $typ),*) -> ()); + } +} + +define_syscall!(fn log(message: *const u8, len: u64)); +define_syscall!(fn log_64(arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64)); diff --git a/rbpf/tests/execution.rs b/rbpf/tests/execution.rs new file mode 100644 index 00000000000000..f7ef5361a584bd --- /dev/null +++ b/rbpf/tests/execution.rs @@ -0,0 +1,3911 @@ +#![allow(clippy::arithmetic_side_effects)] +#![cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] +// Copyright 2020 Solana Maintainers +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +extern crate byteorder; +extern crate libc; +extern crate solana_rbpf; +extern crate test_utils; +extern crate thiserror; + +use byteorder::{ByteOrder, LittleEndian}; +#[cfg(all(not(windows), target_arch = "x86_64"))] +use rand::{rngs::SmallRng, RngCore, SeedableRng}; +use solana_rbpf::{ + assembler::assemble, + declare_builtin_function, ebpf, + elf::Executable, + error::{EbpfError, ProgramResult}, + memory_region::{AccessType, MemoryMapping, MemoryRegion}, + program::{BuiltinFunction, BuiltinProgram, FunctionRegistry, SBPFVersion}, + static_analysis::Analysis, + syscalls, + verifier::RequisiteVerifier, + vm::{Config, ContextObject, TestContextObject}, +}; +use std::{fs::File, io::Read, sync::Arc}; +use test_utils::{ + assert_error, create_vm, PROG_TCP_PORT_80, TCP_SACK_ASM, TCP_SACK_MATCH, TCP_SACK_NOMATCH, +}; + +const INSTRUCTION_METER_BUDGET: u64 = 1024; + +macro_rules! test_interpreter_and_jit { + (register, $function_registry:expr, $location:expr => $syscall_function:expr) => { + $function_registry + .register_function_hashed($location.as_bytes(), $syscall_function) + .unwrap(); + }; + ($executable:expr, $mem:tt, $context_object:expr, $expected_result:expr $(,)?) => { + let expected_instruction_count = $context_object.get_remaining(); + #[allow(unused_mut)] + let mut context_object = $context_object; + let expected_result = format!("{:?}", $expected_result); + if !expected_result.contains("ExceededMaxInstructions") { + context_object.remaining = INSTRUCTION_METER_BUDGET; + } + $executable.verify::().unwrap(); + let (instruction_count_interpreter, interpreter_final_pc, _tracer_interpreter) = { + let mut mem = $mem; + let mem_region = MemoryRegion::new_writable(&mut mem, ebpf::MM_INPUT_START); + let mut context_object = context_object.clone(); + create_vm!( + vm, + &$executable, + &mut context_object, + stack, + heap, + vec![mem_region], + None + ); + let (instruction_count_interpreter, result) = vm.execute_program(&$executable, true); + assert_eq!( + format!("{:?}", result), + expected_result, + "Unexpected result for Interpreter" + ); + ( + instruction_count_interpreter, + vm.registers[11], + vm.context_object_pointer.clone(), + ) + }; + #[cfg(all(not(windows), target_arch = "x86_64"))] + { + #[allow(unused_mut)] + let compilation_result = $executable.jit_compile(); + let mut mem = $mem; + let mem_region = MemoryRegion::new_writable(&mut mem, ebpf::MM_INPUT_START); + create_vm!( + vm, + &$executable, + &mut context_object, + stack, + heap, + vec![mem_region], + None + ); + match compilation_result { + Err(err) => assert_eq!( + format!("{:?}", err), + expected_result, + "Unexpected result for JIT compilation" + ), + Ok(()) => { + let (instruction_count_jit, result) = vm.execute_program(&$executable, false); + let tracer_jit = &vm.context_object_pointer; + if !TestContextObject::compare_trace_log(&_tracer_interpreter, tracer_jit) { + let analysis = Analysis::from_executable(&$executable).unwrap(); + let stdout = std::io::stdout(); + analysis + .disassemble_trace_log( + &mut stdout.lock(), + &_tracer_interpreter.trace_log, + ) + .unwrap(); + analysis + .disassemble_trace_log(&mut stdout.lock(), &tracer_jit.trace_log) + .unwrap(); + panic!(); + } + assert_eq!( + format!("{:?}", result), + expected_result, + "Unexpected result for JIT" + ); + assert_eq!( + instruction_count_interpreter, instruction_count_jit, + "Interpreter and JIT instruction meter diverged", + ); + assert_eq!( + interpreter_final_pc, vm.registers[11], + "Interpreter and JIT instruction final PC diverged", + ); + } + } + } + if $executable.get_config().enable_instruction_meter { + assert_eq!( + instruction_count_interpreter, expected_instruction_count, + "Instruction meter did not consume expected amount" + ); + } + }; +} + +macro_rules! test_interpreter_and_jit_asm { + ($source:tt, $config:expr, $mem:tt, ($($location:expr => $syscall_function:expr),* $(,)?), $context_object:expr, $expected_result:expr $(,)?) => { + #[allow(unused_mut)] + { + let mut config = $config; + config.enable_instruction_tracing = true; + let mut function_registry = FunctionRegistry::>::default(); + $(test_interpreter_and_jit!(register, function_registry, $location => $syscall_function);)* + let loader = Arc::new(BuiltinProgram::new_loader(config, function_registry)); + let mut executable = assemble($source, loader).unwrap(); + test_interpreter_and_jit!(executable, $mem, $context_object, $expected_result); + } + }; + ($source:tt, $mem:tt, ($($location:expr => $syscall_function:expr),* $(,)?), $context_object:expr, $expected_result:expr $(,)?) => { + #[allow(unused_mut)] + { + test_interpreter_and_jit_asm!($source, Config::default(), $mem, ($($location => $syscall_function),*), $context_object, $expected_result); + } + }; +} + +macro_rules! test_interpreter_and_jit_elf { + ($source:tt, $config:tt, $mem:tt, ($($location:expr => $syscall_function:expr),* $(,)?), $context_object:expr, $expected_result:expr $(,)?) => { + let mut file = File::open($source).unwrap(); + let mut elf = Vec::new(); + file.read_to_end(&mut elf).unwrap(); + #[allow(unused_mut)] + { + let mut function_registry = FunctionRegistry::>::default(); + $(test_interpreter_and_jit!(register, function_registry, $location => $syscall_function);)* + let loader = Arc::new(BuiltinProgram::new_loader($config, function_registry)); + let mut executable = Executable::::from_elf(&elf, loader).unwrap(); + test_interpreter_and_jit!(executable, $mem, $context_object, $expected_result); + } + }; + ($source:tt, $mem:tt, ($($location:expr => $syscall_function:expr),* $(,)?), $context_object:expr, $expected_result:expr $(,)?) => { + let config = Config { + enable_instruction_tracing: true, + ..Config::default() + }; + test_interpreter_and_jit_elf!($source, config, $mem, ($($location => $syscall_function),*), $context_object, $expected_result); + }; +} + +// BPF_ALU : Arithmetic and Logic + +#[test] +fn test_mov() { + test_interpreter_and_jit_asm!( + " + mov32 r1, 1 + mov32 r0, r1 + exit", + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_mov32_imm_large() { + test_interpreter_and_jit_asm!( + " + mov32 r0, -1 + exit", + [], + (), + TestContextObject::new(2), + ProgramResult::Ok(0xffffffff), + ); +} + +#[test] +fn test_mov_large() { + test_interpreter_and_jit_asm!( + " + mov32 r1, -1 + mov32 r0, r1 + exit", + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(0xffffffff), + ); +} + +#[test] +fn test_bounce() { + test_interpreter_and_jit_asm!( + " + mov r0, 1 + mov r6, r0 + mov r7, r6 + mov r8, r7 + mov r9, r8 + mov r0, r9 + exit", + [], + (), + TestContextObject::new(7), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_add32() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov32 r1, 2 + add32 r0, 1 + add32 r0, r1 + exit", + [], + (), + TestContextObject::new(5), + ProgramResult::Ok(0x3), + ); +} + +#[test] +fn test_alu32_arithmetic() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov32 r1, 1 + mov32 r2, 2 + mov32 r3, 3 + mov32 r4, 4 + mov32 r5, 5 + mov32 r6, 6 + mov32 r7, 7 + mov32 r8, 8 + mov32 r9, 9 + sub32 r0, 13 + sub32 r0, r1 + add32 r0, 23 + add32 r0, r7 + lmul32 r0, 7 + lmul32 r0, r3 + udiv32 r0, 2 + udiv32 r0, r4 + exit", + [], + (), + TestContextObject::new(19), + ProgramResult::Ok(110), + ); +} + +#[test] +fn test_alu64_arithmetic() { + test_interpreter_and_jit_asm!( + " + mov r0, 0 + mov r1, 1 + mov r2, 2 + mov r3, 3 + mov r4, 4 + mov r5, 5 + mov r6, 6 + mov r7, 7 + mov r8, 8 + mov r9, 9 + sub r0, 13 + sub r0, r1 + add r0, 23 + add r0, r7 + lmul r0, 7 + lmul r0, r3 + udiv r0, 2 + udiv r0, r4 + exit", + [], + (), + TestContextObject::new(19), + ProgramResult::Ok(110), + ); +} + +#[test] +fn test_lmul128() { + test_interpreter_and_jit_asm!( + " + mov r0, r1 + mov r2, 30 + mov r3, 0 + mov r4, 20 + mov r5, 0 + lmul64 r3, r4 + lmul64 r5, r2 + add64 r5, r3 + mov64 r0, r2 + rsh64 r0, 0x20 + mov64 r3, r4 + rsh64 r3, 0x20 + mov64 r6, r3 + lmul64 r6, r0 + add64 r5, r6 + lsh64 r4, 0x20 + rsh64 r4, 0x20 + mov64 r6, r4 + lmul64 r6, r0 + lsh64 r2, 0x20 + rsh64 r2, 0x20 + lmul64 r4, r2 + mov64 r0, r4 + rsh64 r0, 0x20 + add64 r0, r6 + mov64 r6, r0 + rsh64 r6, 0x20 + add64 r5, r6 + lmul64 r3, r2 + lsh64 r0, 0x20 + rsh64 r0, 0x20 + add64 r0, r3 + mov64 r2, r0 + rsh64 r2, 0x20 + add64 r5, r2 + stxdw [r1+0x8], r5 + lsh64 r0, 0x20 + lsh64 r4, 0x20 + rsh64 r4, 0x20 + or64 r0, r4 + stxdw [r1+0x0], r0 + exit", + [0; 16], + (), + TestContextObject::new(42), + ProgramResult::Ok(600), + ); +} + +#[test] +fn test_alu32_logic() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov32 r1, 1 + mov32 r2, 2 + mov32 r3, 3 + mov32 r4, 4 + mov32 r5, 5 + mov32 r6, 6 + mov32 r7, 7 + mov32 r8, 8 + or32 r0, r5 + or32 r0, 0xa0 + and32 r0, 0xa3 + mov32 r9, 0x91 + and32 r0, r9 + lsh32 r0, 22 + lsh32 r0, r8 + rsh32 r0, 19 + rsh32 r0, r7 + xor32 r0, 0x03 + xor32 r0, r2 + exit", + [], + (), + TestContextObject::new(21), + ProgramResult::Ok(0x11), + ); +} + +#[test] +fn test_alu64_logic() { + test_interpreter_and_jit_asm!( + " + mov r0, 0 + mov r1, 1 + mov r2, 2 + mov r3, 3 + mov r4, 4 + mov r5, 5 + mov r6, 6 + mov r7, 7 + mov r8, 8 + or r0, r5 + or r0, 0xa0 + and r0, 0xa3 + mov r9, 0x91 + and r0, r9 + lsh r0, 32 + lsh r0, 22 + lsh r0, r8 + rsh r0, 32 + rsh r0, 19 + rsh r0, r7 + xor r0, 0x03 + xor r0, r2 + exit", + [], + (), + TestContextObject::new(23), + ProgramResult::Ok(0x11), + ); +} + +#[test] +fn test_arsh32_high_shift() { + test_interpreter_and_jit_asm!( + " + mov r0, 8 + mov32 r1, 0x00000001 + hor64 r1, 0x00000001 + arsh32 r0, r1 + exit", + [], + (), + TestContextObject::new(5), + ProgramResult::Ok(0x4), + ); +} + +#[test] +fn test_arsh32_imm() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0xf8 + lsh32 r0, 28 + arsh32 r0, 16 + exit", + [], + (), + TestContextObject::new(4), + ProgramResult::Ok(0xffff8000), + ); +} + +#[test] +fn test_arsh32_reg() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0xf8 + mov32 r1, 16 + lsh32 r0, 28 + arsh32 r0, r1 + exit", + [], + (), + TestContextObject::new(5), + ProgramResult::Ok(0xffff8000), + ); +} + +#[test] +fn test_arsh64() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 1 + lsh r0, 63 + arsh r0, 55 + mov32 r1, 5 + arsh r0, r1 + exit", + [], + (), + TestContextObject::new(6), + ProgramResult::Ok(0xfffffffffffffff8), + ); +} + +#[test] +fn test_lsh64_reg() { + test_interpreter_and_jit_asm!( + " + mov r0, 0x1 + mov r7, 4 + lsh r0, r7 + exit", + [], + (), + TestContextObject::new(4), + ProgramResult::Ok(0x10), + ); +} + +#[test] +fn test_rhs32_imm() { + test_interpreter_and_jit_asm!( + " + xor r0, r0 + add r0, -1 + rsh32 r0, 8 + exit", + [], + (), + TestContextObject::new(4), + ProgramResult::Ok(0x00ffffff), + ); +} + +#[test] +fn test_rsh64_reg() { + test_interpreter_and_jit_asm!( + " + mov r0, 0x10 + mov r7, 4 + rsh r0, r7 + exit", + [], + (), + TestContextObject::new(4), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_be16() { + test_interpreter_and_jit_asm!( + " + ldxh r0, [r1] + be16 r0 + exit", + [0x11, 0x22], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x1122), + ); +} + +#[test] +fn test_be16_high() { + test_interpreter_and_jit_asm!( + " + ldxdw r0, [r1] + be16 r0 + exit", + [0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x1122), + ); +} + +#[test] +fn test_be32() { + test_interpreter_and_jit_asm!( + " + ldxw r0, [r1] + be32 r0 + exit", + [0x11, 0x22, 0x33, 0x44], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x11223344), + ); +} + +#[test] +fn test_be32_high() { + test_interpreter_and_jit_asm!( + " + ldxdw r0, [r1] + be32 r0 + exit", + [0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x11223344), + ); +} + +#[test] +fn test_be64() { + test_interpreter_and_jit_asm!( + " + ldxdw r0, [r1] + be64 r0 + exit", + [0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x1122334455667788), + ); +} + +// BPF_PQR : Product / Quotient / Remainder + +#[test] +fn test_pqr() { + let mut prog = [0; 48]; + prog[0] = ebpf::MOV64_IMM; + prog[8] = ebpf::HOR64_IMM; + prog[16] = ebpf::MOV64_IMM; + prog[17] = 1; // dst = R1 + prog[24] = ebpf::HOR64_IMM; + prog[25] = 1; // dst = R1 + prog[33] = 16; // src = R1 + prog[40] = ebpf::EXIT; + let loader = Arc::new(BuiltinProgram::new_mock()); + for (opc, dst, src, expected_result) in [ + (ebpf::UHMUL64_IMM, 13u64, 4u64, 0u64), + (ebpf::UDIV32_IMM, 13u64, 4u64, 3u64), + (ebpf::UDIV64_IMM, 13u64, 4u64, 3u64), + (ebpf::UREM32_IMM, 13u64, 4u64, 1u64), + (ebpf::UREM64_IMM, 13u64, 4u64, 1u64), + (ebpf::UHMUL64_IMM, 13u64, u64::MAX, 12u64), + (ebpf::UDIV32_IMM, 13u64, u64::MAX, 0u64), + (ebpf::UDIV64_IMM, 13u64, u64::MAX, 0u64), + (ebpf::UREM32_IMM, 13u64, u64::MAX, 13u64), + (ebpf::UREM64_IMM, 13u64, u64::MAX, 13u64), + (ebpf::UHMUL64_IMM, u64::MAX, 4u64, 3u64), + (ebpf::UDIV32_IMM, u64::MAX, 4u64, (u32::MAX / 4) as u64), + (ebpf::UDIV64_IMM, u64::MAX, 4u64, u64::MAX / 4), + (ebpf::UREM32_IMM, u64::MAX, 4u64, 3u64), + (ebpf::UREM64_IMM, u64::MAX, 4u64, 3u64), + (ebpf::UHMUL64_IMM, u64::MAX, u64::MAX, u64::MAX - 1), + (ebpf::UDIV32_IMM, u64::MAX, u64::MAX, 1u64), + (ebpf::UDIV64_IMM, u64::MAX, u64::MAX, 1u64), + (ebpf::UREM32_IMM, u64::MAX, u64::MAX, 0u64), + (ebpf::UREM64_IMM, u64::MAX, u64::MAX, 0u64), + (ebpf::LMUL32_IMM, 13i64 as u64, 4i32 as u64, 52i32 as u64), + (ebpf::LMUL64_IMM, 13i64 as u64, 4i64 as u64, 52i64 as u64), + (ebpf::SHMUL64_IMM, 13i64 as u64, 4i64 as u64, 0i64 as u64), + (ebpf::SDIV32_IMM, 13i64 as u64, 4i32 as u64, 3i32 as u64), + (ebpf::SDIV64_IMM, 13i64 as u64, 4i64 as u64, 3i64 as u64), + (ebpf::SREM32_IMM, 13i64 as u64, 4i32 as u64, 1i64 as u64), + (ebpf::SREM64_IMM, 13i64 as u64, 4i64 as u64, 1i64 as u64), + (ebpf::LMUL32_IMM, 13i64 as u64, -4i32 as u64, -52i32 as u64), + (ebpf::LMUL64_IMM, 13i64 as u64, -4i64 as u64, -52i64 as u64), + (ebpf::SHMUL64_IMM, 13i64 as u64, -4i64 as u64, -1i64 as u64), + (ebpf::SDIV32_IMM, 13i64 as u64, -4i32 as u64, -3i32 as u64), + (ebpf::SDIV64_IMM, 13i64 as u64, -4i64 as u64, -3i64 as u64), + (ebpf::SREM32_IMM, 13i64 as u64, -4i32 as u64, 1i64 as u64), + (ebpf::SREM64_IMM, 13i64 as u64, -4i64 as u64, 1i64 as u64), + (ebpf::LMUL32_IMM, -13i64 as u64, 4i32 as u64, -52i32 as u64), + (ebpf::LMUL64_IMM, -13i64 as u64, 4i64 as u64, -52i64 as u64), + (ebpf::SHMUL64_IMM, -13i64 as u64, 4i64 as u64, -1i64 as u64), + (ebpf::SDIV32_IMM, -13i64 as u64, 4i32 as u64, -3i32 as u64), + (ebpf::SDIV64_IMM, -13i64 as u64, 4i64 as u64, -3i64 as u64), + (ebpf::SREM32_IMM, -13i64 as u64, 4i32 as u64, -1i64 as u64), + (ebpf::SREM64_IMM, -13i64 as u64, 4i64 as u64, -1i64 as u64), + (ebpf::LMUL32_IMM, -13i64 as u64, -4i32 as u64, 52i32 as u64), + (ebpf::LMUL64_IMM, -13i64 as u64, -4i64 as u64, 52i64 as u64), + (ebpf::SHMUL64_IMM, -13i64 as u64, -4i64 as u64, 0i64 as u64), + (ebpf::SDIV32_IMM, -13i64 as u64, -4i32 as u64, 3i32 as u64), + (ebpf::SDIV64_IMM, -13i64 as u64, -4i64 as u64, 3i64 as u64), + (ebpf::SREM32_IMM, -13i64 as u64, -4i32 as u64, -1i64 as u64), + (ebpf::SREM64_IMM, -13i64 as u64, -4i64 as u64, -1i64 as u64), + ] { + LittleEndian::write_u32(&mut prog[4..], dst as u32); + LittleEndian::write_u32(&mut prog[12..], (dst >> 32) as u32); + LittleEndian::write_u32(&mut prog[20..], src as u32); + LittleEndian::write_u32(&mut prog[28..], (src >> 32) as u32); + LittleEndian::write_u32(&mut prog[36..], src as u32); + prog[32] = opc; + #[allow(unused_mut)] + let mut executable = Executable::::from_text_bytes( + &prog, + loader.clone(), + SBPFVersion::V2, + FunctionRegistry::default(), + ) + .unwrap(); + test_interpreter_and_jit!( + executable, + [], + TestContextObject::new(6), + ProgramResult::Ok(expected_result), + ); + prog[32] |= ebpf::BPF_X; + #[allow(unused_mut)] + let mut executable = Executable::::from_text_bytes( + &prog, + loader.clone(), + SBPFVersion::V2, + FunctionRegistry::default(), + ) + .unwrap(); + test_interpreter_and_jit!( + executable, + [], + TestContextObject::new(6), + ProgramResult::Ok(expected_result), + ); + } +} + +#[test] +fn test_err_divide_by_zero() { + let mut prog = [0; 24]; + prog[0] = ebpf::MOV32_IMM; + prog[16] = ebpf::EXIT; + let loader = Arc::new(BuiltinProgram::new_mock()); + for opc in [ + ebpf::UDIV32_REG, + ebpf::UDIV64_REG, + ebpf::UREM32_REG, + ebpf::UREM64_REG, + ebpf::SDIV32_REG, + ebpf::SDIV64_REG, + ebpf::SREM32_REG, + ebpf::SREM64_REG, + ] { + prog[8] = opc; + #[allow(unused_mut)] + let mut executable = Executable::::from_text_bytes( + &prog, + loader.clone(), + SBPFVersion::V2, + FunctionRegistry::default(), + ) + .unwrap(); + test_interpreter_and_jit!( + executable, + [], + TestContextObject::new(2), + ProgramResult::Err(EbpfError::DivideByZero), + ); + } +} + +#[test] +fn test_err_divide_overflow() { + let mut prog = [0; 40]; + prog[0] = ebpf::MOV64_IMM; + LittleEndian::write_i32(&mut prog[4..], 1); + prog[8] = ebpf::LSH64_IMM; + prog[16] = ebpf::MOV64_IMM; + prog[17] = 1; // dst = R1 + LittleEndian::write_i32(&mut prog[20..], -1); + prog[25] = 16; // src = R1 + LittleEndian::write_i32(&mut prog[28..], -1); + prog[32] = ebpf::EXIT; + let loader = Arc::new(BuiltinProgram::new_mock()); + for opc in [ + ebpf::SDIV32_IMM, + ebpf::SDIV64_IMM, + ebpf::SREM32_IMM, + ebpf::SREM64_IMM, + ebpf::SDIV32_REG, + ebpf::SDIV64_REG, + ebpf::SREM32_REG, + ebpf::SREM64_REG, + ] { + prog[12] = if opc & ebpf::BPF_B != 0 { 63 } else { 31 }; + prog[24] = opc; + #[allow(unused_mut)] + let mut executable = Executable::::from_text_bytes( + &prog, + loader.clone(), + SBPFVersion::V2, + FunctionRegistry::default(), + ) + .unwrap(); + test_interpreter_and_jit!( + executable, + [], + TestContextObject::new(4), + ProgramResult::Err(EbpfError::DivideOverflow), + ); + } +} + +// BPF_LD : Loads + +#[test] +fn test_hor64() { + test_interpreter_and_jit_asm!( + " + hor64 r0, 0x10203040 + hor64 r0, 0x01020304 + exit", + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x1122334400000000), + ); +} + +#[test] +fn test_ldxb() { + test_interpreter_and_jit_asm!( + " + ldxb r0, [r1+2] + exit", + [0xaa, 0xbb, 0x11, 0xcc, 0xdd], + (), + TestContextObject::new(2), + ProgramResult::Ok(0x11), + ); +} + +#[test] +fn test_ldxh() { + test_interpreter_and_jit_asm!( + " + ldxh r0, [r1+2] + exit", + [0xaa, 0xbb, 0x11, 0x22, 0xcc, 0xdd], + (), + TestContextObject::new(2), + ProgramResult::Ok(0x2211), + ); +} + +#[test] +fn test_ldxw() { + test_interpreter_and_jit_asm!( + " + ldxw r0, [r1+2] + exit", + [ + 0xaa, 0xbb, 0x11, 0x22, 0x33, 0x44, 0xcc, 0xdd, // + ], + (), + TestContextObject::new(2), + ProgramResult::Ok(0x44332211), + ); +} + +#[test] +fn test_ldxh_same_reg() { + test_interpreter_and_jit_asm!( + " + mov r0, r1 + sth [r0], 0x1234 + ldxh r0, [r0] + exit", + [0xff, 0xff], + (), + TestContextObject::new(4), + ProgramResult::Ok(0x1234), + ); +} + +#[test] +fn test_lldxdw() { + test_interpreter_and_jit_asm!( + " + ldxdw r0, [r1+2] + exit", + [ + 0xaa, 0xbb, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, // + 0x77, 0x88, 0xcc, 0xdd, // + ], + (), + TestContextObject::new(2), + ProgramResult::Ok(0x8877665544332211), + ); +} + +#[test] +fn test_err_ldxdw_oob() { + test_interpreter_and_jit_asm!( + " + ldxdw r0, [r1+6] + exit", + [ + 0xaa, 0xbb, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, // + 0x77, 0x88, 0xcc, 0xdd, // + ], + (), + TestContextObject::new(1), + ProgramResult::Err(EbpfError::AccessViolation( + AccessType::Load, + 0x400000006, + 8, + "input" + )), + ); +} + +#[test] +fn test_err_ldxdw_nomem() { + test_interpreter_and_jit_asm!( + " + ldxdw r0, [r1+6] + exit", + [], + (), + TestContextObject::new(1), + ProgramResult::Err(EbpfError::AccessViolation( + AccessType::Load, + 0x400000006, + 8, + "input" + )), + ); +} + +#[test] +fn test_ldxb_all() { + test_interpreter_and_jit_asm!( + " + mov r0, r1 + ldxb r9, [r0+0] + lsh r9, 0 + ldxb r8, [r0+1] + lsh r8, 4 + ldxb r7, [r0+2] + lsh r7, 8 + ldxb r6, [r0+3] + lsh r6, 12 + ldxb r5, [r0+4] + lsh r5, 16 + ldxb r4, [r0+5] + lsh r4, 20 + ldxb r3, [r0+6] + lsh r3, 24 + ldxb r2, [r0+7] + lsh r2, 28 + ldxb r1, [r0+8] + lsh r1, 32 + ldxb r0, [r0+9] + lsh r0, 36 + or r0, r1 + or r0, r2 + or r0, r3 + or r0, r4 + or r0, r5 + or r0, r6 + or r0, r7 + or r0, r8 + or r0, r9 + exit", + [ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, // + 0x08, 0x09, // + ], + (), + TestContextObject::new(31), + ProgramResult::Ok(0x9876543210), + ); +} + +#[test] +fn test_ldxh_all() { + test_interpreter_and_jit_asm!( + " + mov r0, r1 + ldxh r9, [r0+0] + be16 r9 + lsh r9, 0 + ldxh r8, [r0+2] + be16 r8 + lsh r8, 4 + ldxh r7, [r0+4] + be16 r7 + lsh r7, 8 + ldxh r6, [r0+6] + be16 r6 + lsh r6, 12 + ldxh r5, [r0+8] + be16 r5 + lsh r5, 16 + ldxh r4, [r0+10] + be16 r4 + lsh r4, 20 + ldxh r3, [r0+12] + be16 r3 + lsh r3, 24 + ldxh r2, [r0+14] + be16 r2 + lsh r2, 28 + ldxh r1, [r0+16] + be16 r1 + lsh r1, 32 + ldxh r0, [r0+18] + be16 r0 + lsh r0, 36 + or r0, r1 + or r0, r2 + or r0, r3 + or r0, r4 + or r0, r5 + or r0, r6 + or r0, r7 + or r0, r8 + or r0, r9 + exit", + [ + 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, // + 0x00, 0x04, 0x00, 0x05, 0x00, 0x06, 0x00, 0x07, // + 0x00, 0x08, 0x00, 0x09, // + ], + (), + TestContextObject::new(41), + ProgramResult::Ok(0x9876543210), + ); +} + +#[test] +fn test_ldxh_all2() { + test_interpreter_and_jit_asm!( + " + mov r0, r1 + ldxh r9, [r0+0] + be16 r9 + ldxh r8, [r0+2] + be16 r8 + ldxh r7, [r0+4] + be16 r7 + ldxh r6, [r0+6] + be16 r6 + ldxh r5, [r0+8] + be16 r5 + ldxh r4, [r0+10] + be16 r4 + ldxh r3, [r0+12] + be16 r3 + ldxh r2, [r0+14] + be16 r2 + ldxh r1, [r0+16] + be16 r1 + ldxh r0, [r0+18] + be16 r0 + or r0, r1 + or r0, r2 + or r0, r3 + or r0, r4 + or r0, r5 + or r0, r6 + or r0, r7 + or r0, r8 + or r0, r9 + exit", + [ + 0x00, 0x01, 0x00, 0x02, 0x00, 0x04, 0x00, 0x08, // + 0x00, 0x10, 0x00, 0x20, 0x00, 0x40, 0x00, 0x80, // + 0x01, 0x00, 0x02, 0x00, // + ], + (), + TestContextObject::new(31), + ProgramResult::Ok(0x3ff), + ); +} + +#[test] +fn test_ldxw_all() { + test_interpreter_and_jit_asm!( + " + mov r0, r1 + ldxw r9, [r0+0] + be32 r9 + ldxw r8, [r0+4] + be32 r8 + ldxw r7, [r0+8] + be32 r7 + ldxw r6, [r0+12] + be32 r6 + ldxw r5, [r0+16] + be32 r5 + ldxw r4, [r0+20] + be32 r4 + ldxw r3, [r0+24] + be32 r3 + ldxw r2, [r0+28] + be32 r2 + ldxw r1, [r0+32] + be32 r1 + ldxw r0, [r0+36] + be32 r0 + or r0, r1 + or r0, r2 + or r0, r3 + or r0, r4 + or r0, r5 + or r0, r6 + or r0, r7 + or r0, r8 + or r0, r9 + exit", + [ + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, // + 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x08, // + 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, // + 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x08, 0x00, // + 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, // + ], + (), + TestContextObject::new(31), + ProgramResult::Ok(0x030f0f), + ); +} + +#[test] +fn test_stb() { + test_interpreter_and_jit_asm!( + " + stb [r1+2], 0x11 + ldxb r0, [r1+2] + exit", + [0xaa, 0xbb, 0xff, 0xcc, 0xdd], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x11), + ); +} + +#[test] +fn test_sth() { + test_interpreter_and_jit_asm!( + " + sth [r1+2], 0x2211 + ldxh r0, [r1+2] + exit", + [ + 0xaa, 0xbb, 0xff, 0xff, 0xcc, 0xdd, // + ], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x2211), + ); +} + +#[test] +fn test_stw() { + test_interpreter_and_jit_asm!( + " + stw [r1+2], 0x44332211 + ldxw r0, [r1+2] + exit", + [ + 0xaa, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xdd, // + ], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x44332211), + ); +} + +#[test] +fn test_stdw() { + test_interpreter_and_jit_asm!( + " + stdw [r1+2], 0x44332211 + ldxdw r0, [r1+2] + exit", + [ + 0xaa, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // + 0xff, 0xff, 0xcc, 0xdd, // + ], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x44332211), + ); +} + +#[test] +fn test_stxb() { + test_interpreter_and_jit_asm!( + " + mov32 r2, 0x11 + stxb [r1+2], r2 + ldxb r0, [r1+2] + exit", + [ + 0xaa, 0xbb, 0xff, 0xcc, 0xdd, // + ], + (), + TestContextObject::new(4), + ProgramResult::Ok(0x11), + ); +} + +#[test] +fn test_stxh() { + test_interpreter_and_jit_asm!( + " + mov32 r2, 0x2211 + stxh [r1+2], r2 + ldxh r0, [r1+2] + exit", + [ + 0xaa, 0xbb, 0xff, 0xff, 0xcc, 0xdd, // + ], + (), + TestContextObject::new(4), + ProgramResult::Ok(0x2211), + ); +} + +#[test] +fn test_stxw() { + test_interpreter_and_jit_asm!( + " + mov32 r2, 0x44332211 + stxw [r1+2], r2 + ldxw r0, [r1+2] + exit", + [ + 0xaa, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xdd, // + ], + (), + TestContextObject::new(4), + ProgramResult::Ok(0x44332211), + ); +} + +#[test] +fn test_stxdw() { + test_interpreter_and_jit_asm!( + " + mov r2, -2005440939 + lsh r2, 32 + or r2, 0x44332211 + stxdw [r1+2], r2 + ldxdw r0, [r1+2] + exit", + [ + 0xaa, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // + 0xff, 0xff, 0xcc, 0xdd, // + ], + (), + TestContextObject::new(6), + ProgramResult::Ok(0x8877665544332211), + ); +} + +#[test] +fn test_stxb_all() { + test_interpreter_and_jit_asm!( + " + mov r0, 0xf0 + mov r2, 0xf2 + mov r3, 0xf3 + mov r4, 0xf4 + mov r5, 0xf5 + mov r6, 0xf6 + mov r7, 0xf7 + mov r8, 0xf8 + stxb [r1], r0 + stxb [r1+1], r2 + stxb [r1+2], r3 + stxb [r1+3], r4 + stxb [r1+4], r5 + stxb [r1+5], r6 + stxb [r1+6], r7 + stxb [r1+7], r8 + ldxdw r0, [r1] + be64 r0 + exit", + [ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // + ], + (), + TestContextObject::new(19), + ProgramResult::Ok(0xf0f2f3f4f5f6f7f8), + ); +} + +#[test] +fn test_stxb_all2() { + test_interpreter_and_jit_asm!( + " + mov r0, r1 + mov r1, 0xf1 + mov r9, 0xf9 + stxb [r0], r1 + stxb [r0+1], r9 + ldxh r0, [r0] + be16 r0 + exit", + [0xff, 0xff], + (), + TestContextObject::new(8), + ProgramResult::Ok(0xf1f9), + ); +} + +#[test] +fn test_stxb_chain() { + test_interpreter_and_jit_asm!( + " + mov r0, r1 + ldxb r9, [r0+0] + stxb [r0+1], r9 + ldxb r8, [r0+1] + stxb [r0+2], r8 + ldxb r7, [r0+2] + stxb [r0+3], r7 + ldxb r6, [r0+3] + stxb [r0+4], r6 + ldxb r5, [r0+4] + stxb [r0+5], r5 + ldxb r4, [r0+5] + stxb [r0+6], r4 + ldxb r3, [r0+6] + stxb [r0+7], r3 + ldxb r2, [r0+7] + stxb [r0+8], r2 + ldxb r1, [r0+8] + stxb [r0+9], r1 + ldxb r0, [r0+9] + exit", + [ + 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x00, 0x00, // + ], + (), + TestContextObject::new(21), + ProgramResult::Ok(0x2a), + ); +} + +// BPF_JMP : Branches + +#[test] +fn test_exit_capped() { + test_interpreter_and_jit_asm!( + " + exit", + [], + (), + TestContextObject::new(0), + ProgramResult::Err(EbpfError::ExceededMaxInstructions), + ); +} + +#[test] +fn test_exit_without_value() { + test_interpreter_and_jit_asm!( + " + exit", + [], + (), + TestContextObject::new(1), + ProgramResult::Ok(0x0), + ); +} + +#[test] +fn test_exit() { + test_interpreter_and_jit_asm!( + " + mov r0, 0 + exit", + [], + (), + TestContextObject::new(2), + ProgramResult::Ok(0x0), + ); +} + +#[test] +fn test_early_exit() { + test_interpreter_and_jit_asm!( + " + mov r0, 3 + exit + mov r0, 4 + exit", + [], + (), + TestContextObject::new(2), + ProgramResult::Ok(0x3), + ); +} + +#[test] +fn test_ja() { + test_interpreter_and_jit_asm!( + " + mov r0, 1 + ja +1 + mov r0, 2 + exit", + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jeq_imm() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov32 r1, 0xa + jeq r1, 0xb, +4 + mov32 r0, 1 + mov32 r1, 0xb + jeq r1, 0xb, +1 + mov32 r0, 2 + exit", + [], + (), + TestContextObject::new(7), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jeq_reg() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov32 r1, 0xa + mov32 r2, 0xb + jeq r1, r2, +4 + mov32 r0, 1 + mov32 r1, 0xb + jeq r1, r2, +1 + mov32 r0, 2 + exit", + [], + (), + TestContextObject::new(8), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jge_imm() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov32 r1, 0xa + jge r1, 0xb, +4 + mov32 r0, 1 + mov32 r1, 0xc + jge r1, 0xb, +1 + mov32 r0, 2 + exit", + [], + (), + TestContextObject::new(7), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jge_reg() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov32 r1, 0xa + mov32 r2, 0xb + jge r1, r2, +4 + mov32 r0, 1 + mov32 r1, 0xb + jge r1, r2, +1 + mov32 r0, 2 + exit", + [], + (), + TestContextObject::new(8), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jle_imm() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov32 r1, 5 + jle r1, 4, +1 + jle r1, 6, +1 + exit + jle r1, 5, +1 + exit + mov32 r0, 1 + exit", + [], + (), + TestContextObject::new(7), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jle_reg() { + test_interpreter_and_jit_asm!( + " + mov r0, 0 + mov r1, 5 + mov r2, 4 + mov r3, 6 + jle r1, r2, +2 + jle r1, r1, +1 + exit + jle r1, r3, +1 + exit + mov r0, 1 + exit", + [], + (), + TestContextObject::new(9), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jgt_imm() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov32 r1, 5 + jgt r1, 6, +2 + jgt r1, 5, +1 + jgt r1, 4, +1 + exit + mov32 r0, 1 + exit", + [], + (), + TestContextObject::new(7), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jgt_reg() { + test_interpreter_and_jit_asm!( + " + mov r0, 0 + mov r1, 5 + mov r2, 6 + mov r3, 4 + jgt r1, r2, +2 + jgt r1, r1, +1 + jgt r1, r3, +1 + exit + mov r0, 1 + exit", + [], + (), + TestContextObject::new(9), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jlt_imm() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov32 r1, 5 + jlt r1, 4, +2 + jlt r1, 5, +1 + jlt r1, 6, +1 + exit + mov32 r0, 1 + exit", + [], + (), + TestContextObject::new(7), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jlt_reg() { + test_interpreter_and_jit_asm!( + " + mov r0, 0 + mov r1, 5 + mov r2, 4 + mov r3, 6 + jlt r1, r2, +2 + jlt r1, r1, +1 + jlt r1, r3, +1 + exit + mov r0, 1 + exit", + [], + (), + TestContextObject::new(9), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jne_imm() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov32 r1, 0xb + jne r1, 0xb, +4 + mov32 r0, 1 + mov32 r1, 0xa + jne r1, 0xb, +1 + mov32 r0, 2 + exit", + [], + (), + TestContextObject::new(7), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jne_reg() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov32 r1, 0xb + mov32 r2, 0xb + jne r1, r2, +4 + mov32 r0, 1 + mov32 r1, 0xa + jne r1, r2, +1 + mov32 r0, 2 + exit", + [], + (), + TestContextObject::new(8), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jset_imm() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov32 r1, 0x7 + jset r1, 0x8, +4 + mov32 r0, 1 + mov32 r1, 0x9 + jset r1, 0x8, +1 + mov32 r0, 2 + exit", + [], + (), + TestContextObject::new(7), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jset_reg() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov32 r1, 0x7 + mov32 r2, 0x8 + jset r1, r2, +4 + mov32 r0, 1 + mov32 r1, 0x9 + jset r1, r2, +1 + mov32 r0, 2 + exit", + [], + (), + TestContextObject::new(8), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jsge_imm() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov r1, -2 + jsge r1, -1, +5 + jsge r1, 0, +4 + mov32 r0, 1 + mov r1, -1 + jsge r1, -1, +1 + mov32 r0, 2 + exit", + [], + (), + TestContextObject::new(8), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jsge_reg() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov r1, -2 + mov r2, -1 + mov32 r3, 0 + jsge r1, r2, +5 + jsge r1, r3, +4 + mov32 r0, 1 + mov r1, r2 + jsge r1, r2, +1 + mov32 r0, 2 + exit", + [], + (), + TestContextObject::new(10), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jsle_imm() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov r1, -2 + jsle r1, -3, +1 + jsle r1, -1, +1 + exit + mov32 r0, 1 + jsle r1, -2, +1 + mov32 r0, 2 + exit", + [], + (), + TestContextObject::new(7), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jsle_reg() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov r1, -1 + mov r2, -2 + mov32 r3, 0 + jsle r1, r2, +1 + jsle r1, r3, +1 + exit + mov32 r0, 1 + mov r1, r2 + jsle r1, r2, +1 + mov32 r0, 2 + exit", + [], + (), + TestContextObject::new(10), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jsgt_imm() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov r1, -2 + jsgt r1, -1, +4 + mov32 r0, 1 + mov32 r1, 0 + jsgt r1, -1, +1 + mov32 r0, 2 + exit", + [], + (), + TestContextObject::new(7), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jsgt_reg() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov r1, -2 + mov r2, -1 + jsgt r1, r2, +4 + mov32 r0, 1 + mov32 r1, 0 + jsgt r1, r2, +1 + mov32 r0, 2 + exit", + [], + (), + TestContextObject::new(8), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jslt_imm() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov r1, -2 + jslt r1, -3, +2 + jslt r1, -2, +1 + jslt r1, -1, +1 + exit + mov32 r0, 1 + exit", + [], + (), + TestContextObject::new(7), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_jslt_reg() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 0 + mov r1, -2 + mov r2, -3 + mov r3, -1 + jslt r1, r1, +2 + jslt r1, r2, +1 + jslt r1, r3, +1 + exit + mov32 r0, 1 + exit", + [], + (), + TestContextObject::new(9), + ProgramResult::Ok(0x1), + ); +} + +// Call Stack + +#[test] +fn test_stack1() { + test_interpreter_and_jit_asm!( + " + mov r1, 51 + stdw [r10-16], 0xab + stdw [r10-8], 0xcd + and r1, 1 + lsh r1, 3 + mov r2, r10 + add r2, r1 + ldxdw r0, [r2-16] + exit", + [], + (), + TestContextObject::new(9), + ProgramResult::Ok(0xcd), + ); +} + +#[test] +fn test_stack2() { + test_interpreter_and_jit_asm!( + " + stb [r10-4], 0x01 + stb [r10-3], 0x02 + stb [r10-2], 0x03 + stb [r10-1], 0x04 + mov r1, r10 + mov r2, 0x4 + sub r1, r2 + syscall bpf_mem_frob + mov r1, 0 + ldxb r2, [r10-4] + ldxb r3, [r10-3] + ldxb r4, [r10-2] + ldxb r5, [r10-1] + syscall bpf_gather_bytes + xor r0, 0x2a2a2a2a + exit", + [], + ( + "bpf_mem_frob" => syscalls::SyscallMemFrob::vm, + "bpf_gather_bytes" => syscalls::SyscallGatherBytes::vm, + ), + TestContextObject::new(16), + ProgramResult::Ok(0x01020304), + ); +} + +#[test] +fn test_string_stack() { + test_interpreter_and_jit_asm!( + " + mov r1, 0x78636261 + stxw [r10-8], r1 + mov r6, 0x0 + stxb [r10-4], r6 + stxb [r10-12], r6 + mov r1, 0x79636261 + stxw [r10-16], r1 + mov r1, r10 + add r1, -8 + mov r2, r1 + syscall bpf_str_cmp + mov r1, r0 + mov r0, 0x1 + lsh r1, 0x20 + rsh r1, 0x20 + jne r1, 0x0, +11 + mov r1, r10 + add r1, -8 + mov r2, r10 + add r2, -16 + syscall bpf_str_cmp + mov r1, r0 + lsh r1, 0x20 + rsh r1, 0x20 + mov r0, 0x1 + jeq r1, r6, +1 + mov r0, 0x0 + exit", + [], + ( + "bpf_str_cmp" => syscalls::SyscallStrCmp::vm, + ), + TestContextObject::new(28), + ProgramResult::Ok(0x0), + ); +} + +#[test] +fn test_err_dynamic_stack_out_of_bound() { + let config = Config { + enable_sbpf_v2: true, + max_call_depth: 3, + ..Config::default() + }; + + // The stack goes from MM_STACK_START + config.stack_size() to MM_STACK_START + + // Check that accessing MM_STACK_START - 1 fails + test_interpreter_and_jit_asm!( + " + stb [r10-0x3001], 0 + exit", + config, + [], + (), + TestContextObject::new(1), + ProgramResult::Err(EbpfError::AccessViolation( + AccessType::Store, + ebpf::MM_STACK_START - 1, + 1, + "program" + )), + ); + + // Check that accessing MM_STACK_START + expected_stack_len fails + test_interpreter_and_jit_asm!( + " + stb [r10], 0 + exit", + config, + [], + (), + TestContextObject::new(1), + ProgramResult::Err(EbpfError::AccessViolation( + AccessType::Store, + ebpf::MM_STACK_START + config.stack_size() as u64, + 1, + "stack" + )), + ); +} + +#[test] +fn test_err_dynamic_stack_ptr_overflow() { + let config = Config::default(); + + // See the comment in CallFrames::resize_stack() for the reason why it's + // safe to let the stack pointer overflow + + // stack_ptr -= stack_ptr + 1 + test_interpreter_and_jit_asm!( + " + add r11, -0x7FFFFFFF + add r11, -0x7FFFFFFF + add r11, -0x7FFFFFFF + add r11, -0x7FFFFFFF + add r11, -0x14005 + call function_foo + exit + function_foo: + stb [r10], 0 + exit", + config, + [], + (), + TestContextObject::new(7), + ProgramResult::Err(EbpfError::AccessViolation( + AccessType::Store, + u64::MAX, + 1, + "unknown" + )), + ); +} + +#[test] +fn test_dynamic_stack_frames_empty() { + let config = Config::default(); + + // Check that unless explicitly resized the stack doesn't grow + test_interpreter_and_jit_asm!( + " + call function_foo + exit + function_foo: + mov r0, r10 + exit", + config, + [], + (), + TestContextObject::new(4), + ProgramResult::Ok(ebpf::MM_STACK_START + config.stack_size() as u64), + ); +} + +#[test] +fn test_dynamic_frame_ptr() { + let config = Config::default(); + + // Check that upon entering a function (foo) the frame pointer is advanced + // to the top of the stack + test_interpreter_and_jit_asm!( + " + add r11, -8 + call function_foo + exit + function_foo: + mov r0, r10 + exit", + config, + [], + (), + TestContextObject::new(5), + ProgramResult::Ok(ebpf::MM_STACK_START + config.stack_size() as u64 - 8), + ); + + // And check that when exiting a function (foo) the caller's frame pointer + // is restored + test_interpreter_and_jit_asm!( + " + add r11, -8 + call function_foo + mov r0, r10 + exit + function_foo: + exit + ", + config, + [], + (), + TestContextObject::new(5), + ProgramResult::Ok(ebpf::MM_STACK_START + config.stack_size() as u64), + ); +} + +#[test] +fn test_entrypoint_exit() { + // With fixed frames we used to exit the entrypoint when we reached an exit + // instruction and the stack size was 1 * config.stack_frame_size, which + // meant that we were in the entrypoint's frame. With dynamic frames we + // can't infer anything from the stack size so we track call depth + // explicitly. Make sure exit still works with both fixed and dynamic + // frames. + for enable_sbpf_v2 in [false, true] { + let config = Config { + enable_sbpf_v2, + ..Config::default() + }; + + // This checks that when foo exits we don't stop execution even if the + // stack is empty (stack size and call depth are decoupled) + test_interpreter_and_jit_asm!( + " + entrypoint: + call function_foo + mov r0, 42 + exit + function_foo: + mov r0, 12 + exit", + config, + [], + (), + TestContextObject::new(5), + ProgramResult::Ok(42), + ); + } +} + +#[test] +fn test_stack_call_depth_tracking() { + for enable_sbpf_v2 in [false, true] { + let config = Config { + enable_sbpf_v2, + max_call_depth: 2, + ..Config::default() + }; + + // Given max_call_depth=2, make sure that two sibling calls don't + // trigger CallDepthExceeded. In other words ensure that we correctly + // pop frames in the interpreter and decrement + // EnvironmentStackSlotDepth on ebpf::EXIT in the jit. + test_interpreter_and_jit_asm!( + " + call function_foo + call function_foo + exit + function_foo: + exit + ", + config, + [], + (), + TestContextObject::new(5), + ProgramResult::Ok(0), + ); + + // two nested calls should trigger CallDepthExceeded instead + test_interpreter_and_jit_asm!( + " + entrypoint: + call function_foo + exit + function_foo: + call function_bar + exit + function_bar: + exit + ", + config, + [], + (), + TestContextObject::new(2), + ProgramResult::Err(EbpfError::CallDepthExceeded), + ); + } +} + +#[test] +fn test_err_mem_access_out_of_bound() { + let mem = [0; 512]; + let mut prog = [0; 32]; + prog[0] = ebpf::MOV32_IMM; + prog[8] = ebpf::HOR64_IMM; + prog[16] = ebpf::ST_B_IMM; + prog[24] = ebpf::EXIT; + let loader = Arc::new(BuiltinProgram::new_mock()); + for address in [0x2u64, 0x8002u64, 0x80000002u64, 0x8000000000000002u64] { + LittleEndian::write_u32(&mut prog[4..], address as u32); + LittleEndian::write_u32(&mut prog[12..], (address >> 32) as u32); + #[allow(unused_mut)] + let mut executable = Executable::::from_text_bytes( + &prog, + loader.clone(), + SBPFVersion::V2, + FunctionRegistry::default(), + ) + .unwrap(); + test_interpreter_and_jit!( + executable, + mem, + TestContextObject::new(3), + ProgramResult::Err(EbpfError::AccessViolation( + AccessType::Store, + address, + 1, + "unknown" + )), + ); + } +} + +// CALL_IMM & CALL_REG : Procedure Calls + +#[test] +fn test_relative_call() { + test_interpreter_and_jit_elf!( + "tests/elfs/relative_call.so", + [1], + (), + TestContextObject::new(18), + ProgramResult::Ok(3), + ); +} + +#[test] +fn test_bpf_to_bpf_scratch_registers() { + test_interpreter_and_jit_asm!( + " + mov64 r6, 0x11 + mov64 r7, 0x22 + mov64 r8, 0x44 + mov64 r9, 0x88 + call function_foo + mov64 r0, r6 + add64 r0, r7 + add64 r0, r8 + add64 r0, r9 + exit + function_foo: + mov64 r6, 0x00 + mov64 r7, 0x00 + mov64 r8, 0x00 + mov64 r9, 0x00 + exit", + [], + (), + TestContextObject::new(15), + ProgramResult::Ok(0xFF), + ); +} + +#[test] +fn test_syscall_parameter_on_stack() { + test_interpreter_and_jit_asm!( + " + mov64 r1, r10 + add64 r1, -0x100 + mov64 r2, 0x1 + syscall bpf_syscall_string + mov64 r0, 0x0 + exit", + [], + ( + "bpf_syscall_string" => syscalls::SyscallString::vm, + ), + TestContextObject::new(6), + ProgramResult::Ok(0), + ); +} + +#[test] +fn test_callx() { + test_interpreter_and_jit_asm!( + " + mov64 r0, 0x0 + mov64 r8, 0x1 + lsh64 r8, 0x20 + or64 r8, 0x30 + callx r8 + exit + function_foo: + mov64 r0, 0x2A + exit", + [], + (), + TestContextObject::new(8), + ProgramResult::Ok(42), + ); +} + +#[test] +fn test_err_callx_unregistered() { + test_interpreter_and_jit_asm!( + " + mov64 r0, 0x0 + mov64 r8, 0x1 + lsh64 r8, 0x20 + or64 r8, 0x30 + callx r8 + exit + mov64 r0, 0x2A + exit", + [], + (), + TestContextObject::new(6), + ProgramResult::Err(EbpfError::UnsupportedInstruction), + ); +} + +#[test] +fn test_err_callx_oob_low() { + test_interpreter_and_jit_asm!( + " + mov64 r0, 0x3 + callx r0 + exit", + [], + (), + TestContextObject::new(2), + ProgramResult::Err(EbpfError::CallOutsideTextSegment), + ); +} + +#[test] +fn test_err_callx_oob_high() { + test_interpreter_and_jit_asm!( + " + mov64 r0, -0x1 + lsh64 r0, 0x20 + or64 r0, 0x3 + callx r0 + exit", + [], + (), + TestContextObject::new(4), + ProgramResult::Err(EbpfError::CallOutsideTextSegment), + ); +} + +#[test] +fn test_bpf_to_bpf_depth() { + test_interpreter_and_jit_asm!( + " + ldxb r1, [r1] + add64 r1, -2 + call function_foo + exit + function_foo: + jeq r1, 0, +2 + add64 r1, -1 + call function_foo + exit", + [Config::default().max_call_depth as u8], + (), + TestContextObject::new(78), + ProgramResult::Ok(0), + ); + // The instruction count is lower here because all the `exit`s never run + test_interpreter_and_jit_asm!( + " + ldxb r1, [r1] + add64 r1, -2 + call function_foo + exit + function_foo: + jeq r1, 0, +2 + add64 r1, -1 + call function_foo + exit", + [Config::default().max_call_depth as u8 + 1], + (), + TestContextObject::new(60), + ProgramResult::Err(EbpfError::CallDepthExceeded), + ); +} + +#[test] +fn test_err_reg_stack_depth() { + test_interpreter_and_jit_asm!( + " + mov64 r0, 0x1 + lsh64 r0, 0x20 + callx r0 + exit", + [], + (), + TestContextObject::new(60), + ProgramResult::Err(EbpfError::CallDepthExceeded), + ); +} + +// CALL_IMM : Syscalls + +/* TODO: syscalls::trash_registers needs asm!(). +// https://github.com/rust-lang/rust/issues/72016 +#[test] +fn test_call_save() { + test_interpreter_and_jit_asm!( + " + mov64 r6, 0x1 + mov64 r7, 0x20 + mov64 r8, 0x300 + mov64 r9, 0x4000 + call 0 + mov64 r0, 0x0 + or64 r0, r6 + or64 r0, r7 + or64 r0, r8 + or64 r0, r9 + exit", + [], + ( + 0 => syscalls::trash_registers, + ), + { |_vm, res: ProgramResult| { res.unwrap() == 0 } } + ); +}*/ + +#[test] +fn test_err_syscall_string() { + test_interpreter_and_jit_asm!( + " + mov64 r1, 0x0 + syscall bpf_syscall_string + mov64 r0, 0x0 + exit", + [72, 101, 108, 108, 111], + ( + "bpf_syscall_string" => syscalls::SyscallString::vm, + ), + TestContextObject::new(2), + ProgramResult::Err(EbpfError::SyscallError(Box::new(EbpfError::AccessViolation(AccessType::Load, 0, 0, "unknown")))), + ); +} + +#[test] +fn test_syscall_string() { + test_interpreter_and_jit_asm!( + " + mov64 r2, 0x5 + syscall bpf_syscall_string + mov64 r0, 0x0 + exit", + [72, 101, 108, 108, 111], + ( + "bpf_syscall_string" => syscalls::SyscallString::vm, + ), + TestContextObject::new(4), + ProgramResult::Ok(0), + ); +} + +#[test] +fn test_syscall() { + test_interpreter_and_jit_asm!( + " + mov64 r1, 0xAA + mov64 r2, 0xBB + mov64 r3, 0xCC + mov64 r4, 0xDD + mov64 r5, 0xEE + syscall bpf_syscall_u64 + mov64 r0, 0x0 + exit", + [], + ( + "bpf_syscall_u64" => syscalls::SyscallU64::vm, + ), + TestContextObject::new(8), + ProgramResult::Ok(0), + ); +} + +#[test] +fn test_call_gather_bytes() { + test_interpreter_and_jit_asm!( + " + mov r1, 1 + mov r2, 2 + mov r3, 3 + mov r4, 4 + mov r5, 5 + syscall bpf_gather_bytes + exit", + [], + ( + "bpf_gather_bytes" => syscalls::SyscallGatherBytes::vm, + ), + TestContextObject::new(7), + ProgramResult::Ok(0x0102030405), + ); +} + +#[test] +fn test_call_memfrob() { + test_interpreter_and_jit_asm!( + " + mov r6, r1 + add r1, 2 + mov r2, 4 + syscall bpf_mem_frob + ldxdw r0, [r6] + be64 r0 + exit", + [ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, // + ], + ( + "bpf_mem_frob" => syscalls::SyscallMemFrob::vm, + ), + TestContextObject::new(7), + ProgramResult::Ok(0x102292e2f2c0708), + ); +} + +declare_builtin_function!( + /// For test_nested_vm_syscall() + SyscallNestedVm, + fn rust( + _context_object: &mut TestContextObject, + depth: u64, + throw: u64, + _arg3: u64, + _arg4: u64, + _arg5: u64, + _memory_mapping: &mut MemoryMapping, + ) -> Result> { + let (result, expected_result): (Result>, ProgramResult) = + if throw == 0 { + (Result::Ok(42), ProgramResult::Ok(42)) + } else { + ( + Result::Err(Box::new(EbpfError::CallDepthExceeded)), + ProgramResult::Err(EbpfError::SyscallError(Box::new( + EbpfError::CallDepthExceeded, + ))), + ) + }; + #[allow(unused_mut)] + if depth > 0 { + let mut function_registry = + FunctionRegistry::>::default(); + function_registry + .register_function_hashed(*b"nested_vm_syscall", SyscallNestedVm::vm) + .unwrap(); + let loader = BuiltinProgram::new_loader(Config::default(), function_registry); + let mem = [depth as u8 - 1, throw as u8]; + let mut executable = assemble::( + " + ldxb r2, [r1+1] + ldxb r1, [r1] + syscall nested_vm_syscall + exit", + Arc::new(loader), + ) + .unwrap(); + test_interpreter_and_jit!( + executable, + mem, + TestContextObject::new(if throw == 0 { 4 } else { 3 }), + expected_result, + ); + } + result + } +); + +#[test] +fn test_nested_vm_syscall() { + let config = Config::default(); + let mut context_object = TestContextObject::default(); + let mut memory_mapping = MemoryMapping::new(vec![], &config, &SBPFVersion::V2).unwrap(); + let result = SyscallNestedVm::rust(&mut context_object, 1, 0, 0, 0, 0, &mut memory_mapping); + assert!(result.unwrap() == 42); + let result = SyscallNestedVm::rust(&mut context_object, 1, 1, 0, 0, 0, &mut memory_mapping); + assert_error!(result, "CallDepthExceeded"); +} + +// Instruction Meter Limit + +#[test] +fn test_tight_infinite_loop_conditional() { + test_interpreter_and_jit_asm!( + " + jsge r0, r0, -1 + exit", + [], + (), + TestContextObject::new(4), + ProgramResult::Err(EbpfError::ExceededMaxInstructions), + ); +} + +#[test] +fn test_tight_infinite_loop_unconditional() { + test_interpreter_and_jit_asm!( + " + ja -1 + exit", + [], + (), + TestContextObject::new(4), + ProgramResult::Err(EbpfError::ExceededMaxInstructions), + ); +} + +#[test] +fn test_tight_infinite_recursion() { + test_interpreter_and_jit_asm!( + " + entrypoint: + mov64 r3, 0x41414141 + call entrypoint + exit", + [], + (), + TestContextObject::new(4), + ProgramResult::Err(EbpfError::ExceededMaxInstructions), + ); +} + +#[test] +fn test_tight_infinite_recursion_callx() { + test_interpreter_and_jit_asm!( + " + mov64 r8, 0x1 + lsh64 r8, 0x20 + or64 r8, 0x28 + call function_foo + exit + function_foo: + mov64 r3, 0x41414141 + callx r8 + exit", + [], + (), + TestContextObject::new(8), + ProgramResult::Err(EbpfError::ExceededMaxInstructions), + ); +} + +#[test] +fn test_instruction_count_syscall() { + test_interpreter_and_jit_asm!( + " + mov64 r2, 0x5 + syscall bpf_syscall_string + mov64 r0, 0x0 + exit", + [72, 101, 108, 108, 111], + ( + "bpf_syscall_string" => syscalls::SyscallString::vm, + ), + TestContextObject::new(4), + ProgramResult::Ok(0), + ); +} + +#[test] +fn test_err_instruction_count_syscall_capped() { + test_interpreter_and_jit_asm!( + " + mov64 r2, 0x5 + syscall bpf_syscall_string + mov64 r0, 0x0 + exit", + [72, 101, 108, 108, 111], + ( + "bpf_syscall_string" => syscalls::SyscallString::vm, + ), + TestContextObject::new(3), + ProgramResult::Err(EbpfError::ExceededMaxInstructions), + ); +} + +#[test] +fn test_non_terminate_early() { + test_interpreter_and_jit_asm!( + " + mov64 r6, 0x0 + mov64 r1, 0x0 + mov64 r2, 0x0 + mov64 r3, 0x0 + mov64 r4, 0x0 + mov64 r5, r6 + syscall Unresolved + add64 r6, 0x1 + ja -0x8 + exit", + [], + (), + TestContextObject::new(7), + ProgramResult::Err(EbpfError::UnsupportedInstruction), + ); +} + +#[test] +fn test_err_non_terminate_capped() { + test_interpreter_and_jit_asm!( + " + mov64 r6, 0x0 + mov64 r1, 0x0 + mov64 r2, 0x0 + mov64 r3, 0x0 + mov64 r4, 0x0 + mov64 r5, r6 + syscall bpf_trace_printf + add64 r6, 0x1 + ja -0x8 + exit", + [], + ( + "bpf_trace_printf" => syscalls::SyscallTracePrintf::vm, + ), + TestContextObject::new(7), + ProgramResult::Err(EbpfError::ExceededMaxInstructions), + ); + test_interpreter_and_jit_asm!( + " + mov64 r6, 0x0 + mov64 r1, 0x0 + mov64 r2, 0x0 + mov64 r3, 0x0 + mov64 r4, 0x0 + mov64 r5, r6 + syscall bpf_trace_printf + add64 r6, 0x1 + ja -0x8 + exit", + [], + ( + "bpf_trace_printf" => syscalls::SyscallTracePrintf::vm, + ), + TestContextObject::new(1000), + ProgramResult::Err(EbpfError::ExceededMaxInstructions), + ); +} + +#[test] +fn test_err_capped_before_exception() { + test_interpreter_and_jit_asm!( + " + mov64 r1, 0x0 + mov64 r2, 0x0 + add64 r0, 0x0 + add64 r0, 0x0 + udiv64 r1, r2 + add64 r0, 0x0 + exit", + [], + (), + TestContextObject::new(4), + ProgramResult::Err(EbpfError::ExceededMaxInstructions), + ); + test_interpreter_and_jit_asm!( + " + mov64 r1, 0x0 + mov64 r2, 0x0 + add64 r0, 0x0 + add64 r0, 0x0 + syscall Unresolved + add64 r0, 0x0 + exit", + [], + (), + TestContextObject::new(4), + ProgramResult::Err(EbpfError::ExceededMaxInstructions), + ); +} + +#[test] +fn test_err_exit_capped() { + test_interpreter_and_jit_asm!( + " + mov64 r1, 0x1 + lsh64 r1, 0x20 + or64 r1, 0x28 + callx r1 + exit + function_foo: + exit + ", + [], + (), + TestContextObject::new(5), + ProgramResult::Err(EbpfError::ExceededMaxInstructions), + ); + test_interpreter_and_jit_asm!( + " + mov64 r1, 0x1 + lsh64 r1, 0x20 + or64 r1, 0x28 + callx r1 + exit + function_foo: + mov r0, r0 + exit + ", + [], + (), + TestContextObject::new(6), + ProgramResult::Err(EbpfError::ExceededMaxInstructions), + ); + test_interpreter_and_jit_asm!( + " + call 1 + exit + mov r0, r0 + exit + ", + [], + (), + TestContextObject::new(3), + ProgramResult::Err(EbpfError::ExceededMaxInstructions), + ); +} + +// Symbols and Relocation + +#[test] +fn test_symbol_relocation() { + test_interpreter_and_jit_asm!( + " + mov64 r1, r10 + add64 r1, -0x1 + mov64 r2, 0x1 + syscall bpf_syscall_string + mov64 r0, 0x0 + exit", + [72, 101, 108, 108, 111], + ( + "bpf_syscall_string" => syscalls::SyscallString::vm, + ), + TestContextObject::new(6), + ProgramResult::Ok(0), + ); +} + +#[test] +fn test_err_call_unresolved() { + test_interpreter_and_jit_asm!( + " + mov r1, 1 + mov r2, 2 + mov r3, 3 + mov r4, 4 + mov r5, 5 + syscall Unresolved + mov64 r0, 0x0 + exit", + [], + (), + TestContextObject::new(6), + ProgramResult::Err(EbpfError::UnsupportedInstruction), + ); +} + +#[test] +fn test_syscall_reloc_64_32() { + test_interpreter_and_jit_elf!( + "tests/elfs/syscall_reloc_64_32.so", + [], + ( + "log" => syscalls::SyscallString::vm, + ), + TestContextObject::new(5), + ProgramResult::Ok(0), + ); +} + +#[test] +fn test_syscall_static() { + test_interpreter_and_jit_elf!( + "tests/elfs/syscall_static.so", + [], + ( + "log" => syscalls::SyscallString::vm, + ), + TestContextObject::new(6), + ProgramResult::Ok(0), + ); +} + +#[test] +fn test_err_unresolved_syscall_reloc_64_32() { + let loader = BuiltinProgram::new_loader( + Config { + reject_broken_elfs: true, + ..Config::default() + }, + FunctionRegistry::default(), + ); + let mut file = File::open("tests/elfs/syscall_reloc_64_32.so").unwrap(); + let mut elf = Vec::new(); + file.read_to_end(&mut elf).unwrap(); + assert_error!( + Executable::::from_elf(&elf, Arc::new(loader)), + "UnresolvedSymbol(\"log\", 39, 312)" + ); +} + +#[test] +fn test_err_unresolved_syscall_static() { + test_interpreter_and_jit_elf!( + "tests/elfs/syscall_static.so", + [], + (), + TestContextObject::new(4), + ProgramResult::Err(EbpfError::UnsupportedInstruction), + ); +} + +#[test] +fn test_reloc_64_64_sbpfv1() { + // Tests the correctness of R_BPF_64_64 relocations. The program returns the + // address of the entrypoint. + // [ 1] .text PROGBITS 0000000000000120 000120 000018 00 AX 0 0 8 + test_interpreter_and_jit_elf!( + "tests/elfs/reloc_64_64_sbpfv1.so", + [], + (), + TestContextObject::new(2), + ProgramResult::Ok(ebpf::MM_PROGRAM_START + 0x120), + ); +} + +#[test] +fn test_reloc_64_64() { + // Same as test_reloc_64_64, but with .text already alinged to + // MM_PROGRAM_START by the linker + // [ 1] .text PROGBITS 0000000100000000 001000 000018 00 AX 0 0 8 + test_interpreter_and_jit_elf!( + "tests/elfs/reloc_64_64.so", + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(ebpf::MM_PROGRAM_START), + ); +} + +#[test] +fn test_reloc_64_relative_sbpfv1() { + // Tests the correctness of R_BPF_64_RELATIVE relocations. The program + // returns the address of the first .rodata byte. + // [ 1] .text PROGBITS 0000000000000120 000120 000018 00 AX 0 0 8 + // [ 2] .rodata PROGBITS 0000000000000138 000138 00000a 01 AMS 0 0 1 + test_interpreter_and_jit_elf!( + "tests/elfs/reloc_64_relative_sbpfv1.so", + [], + (), + TestContextObject::new(2), + ProgramResult::Ok(ebpf::MM_PROGRAM_START + 0x138), + ); +} + +#[test] +fn test_reloc_64_relative() { + // Same as test_reloc_64_relative, but with .text placed already within + // MM_PROGRAM_START by the linker + // [ 1] .text PROGBITS 0000000100000000 001000 000018 00 AX 0 0 8 + // [ 2] .rodata PROGBITS 0000000100000018 001018 00000b 01 AMS 0 0 1 + test_interpreter_and_jit_elf!( + "tests/elfs/reloc_64_relative.so", + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(ebpf::MM_PROGRAM_START + 0x18), + ); +} + +#[test] +fn test_reloc_64_relative_data_sbfv1() { + // Tests the correctness of R_BPF_64_RELATIVE relocations in sections other + // than .text. The program returns the address of the first .rodata byte. + // [ 1] .text PROGBITS 00000000000000e8 0000e8 000020 00 AX 0 0 8 + // [ 2] .rodata PROGBITS 0000000000000108 000108 000019 01 AMS 0 0 1 + // + // 00000000000001f8 : + // 63: 08 01 00 00 00 00 00 00 + test_interpreter_and_jit_elf!( + "tests/elfs/reloc_64_relative_data_sbpfv1.so", + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(ebpf::MM_PROGRAM_START + 0x108), + ); +} + +#[test] +fn test_reloc_64_relative_data() { + // Same as test_reloc_64_relative_data, but with rodata already placed + // within MM_PROGRAM_START by the linker + // [ 1] .text PROGBITS 0000000100000000 001000 000020 00 AX 0 0 8 + // [ 2] .rodata PROGBITS 0000000100000020 001020 000019 01 AMS 0 0 1 + // + // 0000000100000110 : + // 536870946: 20 00 00 00 01 00 00 00 + test_interpreter_and_jit_elf!( + "tests/elfs/reloc_64_relative_data.so", + [], + (), + TestContextObject::new(4), + ProgramResult::Ok(ebpf::MM_PROGRAM_START + 0x20), + ); +} + +#[test] +fn test_reloc_64_relative_data_sbpfv1() { + // Before https://github.com/solana-labs/llvm-project/pull/35, we used to + // generate invalid R_BPF_64_RELATIVE relocations in sections other than + // .text. + // + // This test checks that the old behaviour is maintained for backwards + // compatibility when dealing with non-sbfv2 files. See also Elf::relocate(). + // + // The program returns the address of the first .rodata byte. + // [ 1] .text PROGBITS 00000000000000e8 0000e8 000020 00 AX 0 0 8 + // [ 2] .rodata PROGBITS 0000000000000108 000108 000019 01 AMS 0 0 1 + // + // 00000000000001f8 : + // 63: 00 00 00 00 08 01 00 00 + test_interpreter_and_jit_elf!( + "tests/elfs/reloc_64_relative_data_sbpfv1.so", + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(ebpf::MM_PROGRAM_START + 0x108), + ); +} + +#[test] +fn test_load_elf_rodata() { + let config = Config { + optimize_rodata: true, + ..Config::default() + }; + test_interpreter_and_jit_elf!( + "tests/elfs/rodata_section.so", + config, + [], + (), + TestContextObject::new(4), + ProgramResult::Ok(42), + ); +} + +#[test] +fn test_load_elf_rodata_sbpfv1() { + let config = Config { + optimize_rodata: false, + ..Config::default() + }; + test_interpreter_and_jit_elf!( + "tests/elfs/rodata_section_sbpfv1.so", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(42), + ); +} + +// Programs + +#[test] +fn test_lmul_loop() { + test_interpreter_and_jit_asm!( + " + mov r0, 0x7 + add r1, 0xa + lsh r1, 0x20 + rsh r1, 0x20 + jeq r1, 0x0, +4 + mov r0, 0x7 + lmul r0, 0x7 + add r1, -1 + jne r1, 0x0, -3 + exit", + [], + (), + TestContextObject::new(37), + ProgramResult::Ok(0x75db9c97), + ); +} + +#[test] +fn test_prime() { + test_interpreter_and_jit_asm!( + " + mov r1, 67 + mov r0, 0x1 + mov r2, 0x2 + jgt r1, 0x2, +4 + ja +10 + add r2, 0x1 + mov r0, 0x1 + jge r2, r1, +7 + mov r3, r1 + udiv r3, r2 + lmul r3, r2 + mov r4, r1 + sub r4, r3 + mov r0, 0x0 + jne r4, 0x0, -10 + exit", + [], + (), + TestContextObject::new(655), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_subnet() { + test_interpreter_and_jit_asm!( + " + mov r2, 0xe + ldxh r3, [r1+12] + jne r3, 0x81, +2 + mov r2, 0x12 + ldxh r3, [r1+16] + and r3, 0xffff + jne r3, 0x8, +5 + add r1, r2 + mov r0, 0x1 + ldxw r1, [r1+16] + and r1, 0xffffff + jeq r1, 0x1a8c0, +1 + mov r0, 0x0 + exit", + [ + 0x00, 0x00, 0xc0, 0x9f, 0xa0, 0x97, 0x00, 0xa0, // + 0xcc, 0x3b, 0xbf, 0xfa, 0x08, 0x00, 0x45, 0x10, // + 0x00, 0x3c, 0x46, 0x3c, 0x40, 0x00, 0x40, 0x06, // + 0x73, 0x1c, 0xc0, 0xa8, 0x01, 0x02, 0xc0, 0xa8, // + 0x01, 0x01, 0x06, 0x0e, 0x00, 0x17, 0x99, 0xc5, // + 0xa0, 0xec, 0x00, 0x00, 0x00, 0x00, 0xa0, 0x02, // + 0x7d, 0x78, 0xe0, 0xa3, 0x00, 0x00, 0x02, 0x04, // + 0x05, 0xb4, 0x04, 0x02, 0x08, 0x0a, 0x00, 0x9c, // + 0x27, 0x24, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03, // + 0x03, 0x00, // + ], + (), + TestContextObject::new(11), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_tcp_port80_match() { + test_interpreter_and_jit_asm!( + PROG_TCP_PORT_80, + [ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x00, 0x06, // + 0x07, 0x08, 0x09, 0x0a, 0x08, 0x00, 0x45, 0x00, // + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06, // + 0xf9, 0x4d, 0xc0, 0xa8, 0x00, 0x01, 0xc0, 0xa8, // + 0x00, 0x02, 0x27, 0x10, 0x00, 0x50, 0x00, 0x00, // + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, // + 0x20, 0x00, 0xc5, 0x18, 0x00, 0x00, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, // + ], + (), + TestContextObject::new(17), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_tcp_port80_nomatch() { + test_interpreter_and_jit_asm!( + PROG_TCP_PORT_80, + [ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x00, 0x06, // + 0x07, 0x08, 0x09, 0x0a, 0x08, 0x00, 0x45, 0x00, // + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06, // + 0xf9, 0x4d, 0xc0, 0xa8, 0x00, 0x01, 0xc0, 0xa8, // + 0x00, 0x02, 0x00, 0x16, 0x27, 0x10, 0x00, 0x00, // + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x51, 0x02, // + 0x20, 0x00, 0xc5, 0x18, 0x00, 0x00, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, // + ], + (), + TestContextObject::new(18), + ProgramResult::Ok(0x0), + ); +} + +#[test] +fn test_tcp_port80_nomatch_ethertype() { + test_interpreter_and_jit_asm!( + PROG_TCP_PORT_80, + [ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x00, 0x06, // + 0x07, 0x08, 0x09, 0x0a, 0x08, 0x01, 0x45, 0x00, // + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06, // + 0xf9, 0x4d, 0xc0, 0xa8, 0x00, 0x01, 0xc0, 0xa8, // + 0x00, 0x02, 0x27, 0x10, 0x00, 0x50, 0x00, 0x00, // + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, // + 0x20, 0x00, 0xc5, 0x18, 0x00, 0x00, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, // + ], + (), + TestContextObject::new(7), + ProgramResult::Ok(0x0), + ); +} + +#[test] +fn test_tcp_port80_nomatch_proto() { + test_interpreter_and_jit_asm!( + PROG_TCP_PORT_80, + [ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x00, 0x06, // + 0x07, 0x08, 0x09, 0x0a, 0x08, 0x00, 0x45, 0x00, // + 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, // + 0xf9, 0x4d, 0xc0, 0xa8, 0x00, 0x01, 0xc0, 0xa8, // + 0x00, 0x02, 0x27, 0x10, 0x00, 0x50, 0x00, 0x00, // + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, // + 0x20, 0x00, 0xc5, 0x18, 0x00, 0x00, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, // + 0x44, 0x44, 0x44, 0x44, // + ], + (), + TestContextObject::new(9), + ProgramResult::Ok(0x0), + ); +} + +#[test] +fn test_tcp_sack_match() { + test_interpreter_and_jit_asm!( + TCP_SACK_ASM, + TCP_SACK_MATCH, + (), + TestContextObject::new(79), + ProgramResult::Ok(0x1), + ); +} + +#[test] +fn test_tcp_sack_nomatch() { + test_interpreter_and_jit_asm!( + TCP_SACK_ASM, + TCP_SACK_NOMATCH, + (), + TestContextObject::new(55), + ProgramResult::Ok(0x0), + ); +} + +#[test] +fn test_struct_func_pointer() { + // This tests checks that a struct field adjacent to another field + // which is a relocatable function pointer is not overwritten when + // the function pointer is relocated at load time. + test_interpreter_and_jit_elf!( + "tests/elfs/struct_func_pointer.so", + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x102030405060708), + ); +} + +// Fuzzy + +#[cfg(all(not(windows), target_arch = "x86_64"))] +fn execute_generated_program(prog: &[u8]) -> bool { + let max_instruction_count = 1024; + let mem_size = 1024 * 1024; + let executable = Executable::::from_text_bytes( + prog, + Arc::new(BuiltinProgram::new_loader( + Config { + enable_instruction_tracing: true, + ..Config::default() + }, + FunctionRegistry::default(), + )), + SBPFVersion::V2, + FunctionRegistry::default(), + ); + let mut executable = if let Ok(executable) = executable { + executable + } else { + return false; + }; + if executable.verify::().is_err() || executable.jit_compile().is_err() { + return false; + } + let (instruction_count_interpreter, tracer_interpreter, result_interpreter) = { + let mut mem = vec![0u8; mem_size]; + let mut context_object = TestContextObject::new(max_instruction_count); + let mem_region = MemoryRegion::new_writable(&mut mem, ebpf::MM_INPUT_START); + create_vm!( + vm, + &executable, + &mut context_object, + stack, + heap, + vec![mem_region], + None + ); + let (instruction_count_interpreter, result_interpreter) = + vm.execute_program(&executable, true); + let tracer_interpreter = vm.context_object_pointer.clone(); + ( + instruction_count_interpreter, + tracer_interpreter, + result_interpreter, + ) + }; + let mut mem = vec![0u8; mem_size]; + let mut context_object = TestContextObject::new(max_instruction_count); + let mem_region = MemoryRegion::new_writable(&mut mem, ebpf::MM_INPUT_START); + create_vm!( + vm, + &executable, + &mut context_object, + stack, + heap, + vec![mem_region], + None + ); + let (instruction_count_jit, result_jit) = vm.execute_program(&executable, false); + let tracer_jit = &vm.context_object_pointer; + if format!("{result_interpreter:?}") != format!("{result_jit:?}") + || !TestContextObject::compare_trace_log(&tracer_interpreter, tracer_jit) + { + let analysis = + solana_rbpf::static_analysis::Analysis::from_executable(&executable).unwrap(); + println!("result_interpreter={result_interpreter:?}"); + println!("result_jit={result_jit:?}"); + let stdout = std::io::stdout(); + analysis + .disassemble_trace_log(&mut stdout.lock(), &tracer_interpreter.trace_log) + .unwrap(); + analysis + .disassemble_trace_log(&mut stdout.lock(), &tracer_jit.trace_log) + .unwrap(); + panic!(); + } + if executable.get_config().enable_instruction_meter { + assert_eq!(instruction_count_interpreter, instruction_count_jit); + } + true +} + +#[cfg(all(not(windows), target_arch = "x86_64"))] +#[test] +fn test_total_chaos() { + let instruction_count = 6; + let iteration_count = 1000000; + let mut program = vec![0; instruction_count * ebpf::INSN_SIZE]; + program[ebpf::INSN_SIZE * (instruction_count - 1)..ebpf::INSN_SIZE * instruction_count] + .copy_from_slice(&[ebpf::EXIT, 0, 0, 0, 0, 0, 0, 0]); + let seed = 0xC2DB2F8F282284A0; + let mut prng = SmallRng::seed_from_u64(seed); + for _ in 0..iteration_count { + prng.fill_bytes(&mut program[0..ebpf::INSN_SIZE * (instruction_count - 1)]); + execute_generated_program(&program); + } + for _ in 0..iteration_count { + prng.fill_bytes(&mut program[0..ebpf::INSN_SIZE * (instruction_count - 1)]); + for index in (0..program.len()).step_by(ebpf::INSN_SIZE) { + program[index + 0x1] &= 0x77; + program[index + 0x2] &= 0x00; + program[index + 0x3] &= 0x77; + program[index + 0x4] &= 0x00; + program[index + 0x5] &= 0x77; + program[index + 0x6] &= 0x77; + program[index + 0x7] &= 0x77; + } + execute_generated_program(&program); + } +} + +// SBPFv1 only [DEPRECATED] + +#[test] +fn test_err_fixed_stack_out_of_bound() { + let config = Config { + enable_sbpf_v2: false, + max_call_depth: 3, + ..Config::default() + }; + test_interpreter_and_jit_asm!( + " + stb [r10-0x4000], 0 + exit", + config, + [], + (), + TestContextObject::new(1), + ProgramResult::Err(EbpfError::AccessViolation( + AccessType::Store, + 0x1FFFFD000, + 1, + "program" + )), + ); +} + +#[test] +fn test_lddw() { + let config = Config { + enable_sbpf_v2: false, + ..Config::default() + }; + test_interpreter_and_jit_asm!( + " + lddw r0, 0x1122334455667788 + exit", + config, + [], + (), + TestContextObject::new(2), + ProgramResult::Ok(0x1122334455667788), + ); + test_interpreter_and_jit_asm!( + " + lddw r0, 0x0000000080000000 + exit", + config, + [], + (), + TestContextObject::new(2), + ProgramResult::Ok(0x80000000), + ); + test_interpreter_and_jit_asm!( + " + mov r0, 0 + mov r1, 0 + mov r2, 0 + lddw r0, 0x1 + ja +2 + lddw r1, 0x1 + lddw r2, 0x1 + add r1, r2 + add r0, r1 + exit + ", + config, + [], + (), + TestContextObject::new(9), + ProgramResult::Ok(0x2), + ); + test_interpreter_and_jit_asm!( + " + mov64 r8, 0x1 + lsh64 r8, 0x20 + or64 r8, 0x28 + callx r8 + lddw r0, 0x1122334455667788 + exit", + config, + [], + (), + TestContextObject::new(4), + ProgramResult::Err(EbpfError::ExceededMaxInstructions), + ); + test_interpreter_and_jit_asm!( + " + mov64 r8, 0x1 + lsh64 r8, 0x20 + or64 r8, 0x28 + callx r8 + lddw r0, 0x1122334455667788 + exit", + config, + [], + (), + TestContextObject::new(5), + ProgramResult::Err(EbpfError::UnsupportedInstruction), + ); + test_interpreter_and_jit_asm!( + " + mov64 r1, 0x1 + lsh64 r1, 0x20 + or64 r1, 0x38 + callx r1 + mov r0, r0 + mov r0, r0 + lddw r0, 0x1122334455667788 + exit + ", + config, + [], + (), + TestContextObject::new(5), + ProgramResult::Err(EbpfError::UnsupportedInstruction), + ); + test_interpreter_and_jit_asm!( + " + lddw r1, 0x100000038 + callx r1 + mov r0, r0 + mov r0, r0 + exit + lddw r0, 0x1122334455667788 + exit + ", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Err(EbpfError::UnsupportedInstruction), + ); + test_interpreter_and_jit_asm!( + " + mov r0, 0 + lddw r1, 0x1 + mov r2, 0 + exit + ", + config, + [], + (), + TestContextObject::new(2), + ProgramResult::Err(EbpfError::ExceededMaxInstructions), + ); +} + +#[test] +fn test_le() { + let config = Config { + enable_sbpf_v2: false, + ..Config::default() + }; + test_interpreter_and_jit_asm!( + " + ldxh r0, [r1] + le16 r0 + exit", + config, + [0x22, 0x11], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x1122), + ); + test_interpreter_and_jit_asm!( + " + ldxdw r0, [r1] + le16 r0 + exit", + config, + [0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x2211), + ); + test_interpreter_and_jit_asm!( + " + ldxw r0, [r1] + le32 r0 + exit", + config, + [0x44, 0x33, 0x22, 0x11], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x11223344), + ); + test_interpreter_and_jit_asm!( + " + ldxdw r0, [r1] + le32 r0 + exit", + config, + [0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x44332211), + ); + test_interpreter_and_jit_asm!( + " + ldxdw r0, [r1] + le64 r0 + exit", + config, + [0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x1122334455667788), + ); +} + +#[test] +fn test_neg() { + let config = Config { + enable_sbpf_v2: false, + ..Config::default() + }; + test_interpreter_and_jit_asm!( + " + mov32 r0, 2 + neg32 r0 + exit", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(0xfffffffe), + ); + test_interpreter_and_jit_asm!( + " + mov r0, 2 + neg r0 + exit", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(0xfffffffffffffffe), + ); + test_interpreter_and_jit_asm!( + " + mov32 r0, 3 + sub32 r0, 1 + exit", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(2), + ); + test_interpreter_and_jit_asm!( + " + mov r0, 3 + sub r0, 1 + exit", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(2), + ); +} + +#[test] +fn test_callx_imm() { + let config = Config { + enable_sbpf_v2: false, + ..Config::default() + }; + test_interpreter_and_jit_asm!( + " + mov64 r0, 0x0 + mov64 r8, 0x1 + lsh64 r8, 0x20 + or64 r8, 0x30 + callx r8 + exit + function_foo: + mov64 r0, 0x2A + exit", + config, + [], + (), + TestContextObject::new(8), + ProgramResult::Ok(42), + ); +} + +#[test] +fn test_mul() { + let config = Config { + enable_sbpf_v2: false, + ..Config::default() + }; + test_interpreter_and_jit_asm!( + " + mov r0, 3 + mul32 r0, 4 + exit", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(0xc), + ); + test_interpreter_and_jit_asm!( + " + mov r0, 3 + mov r1, 4 + mul32 r0, r1 + exit", + config, + [], + (), + TestContextObject::new(4), + ProgramResult::Ok(0xc), + ); + test_interpreter_and_jit_asm!( + " + mov r0, 0x40000001 + mov r1, 4 + mul32 r0, r1 + exit", + config, + [], + (), + TestContextObject::new(4), + ProgramResult::Ok(0x4), + ); + test_interpreter_and_jit_asm!( + " + mov r0, 0x40000001 + mul r0, 4 + exit", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x100000004), + ); + test_interpreter_and_jit_asm!( + " + mov r0, 0x40000001 + mov r1, 4 + mul r0, r1 + exit", + config, + [], + (), + TestContextObject::new(4), + ProgramResult::Ok(0x100000004), + ); + test_interpreter_and_jit_asm!( + " + mov r0, -1 + mul32 r0, 4 + exit", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(0xFFFFFFFFFFFFFFFC), + ); +} + +#[test] +fn test_div() { + let config = Config { + enable_sbpf_v2: false, + ..Config::default() + }; + test_interpreter_and_jit_asm!( + " + mov r0, 12 + lddw r1, 0x100000004 + div32 r0, r1 + exit", + config, + [], + (), + TestContextObject::new(4), + ProgramResult::Ok(0x3), + ); + test_interpreter_and_jit_asm!( + " + lddw r0, 0x10000000c + div32 r0, 4 + exit", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x3), + ); + test_interpreter_and_jit_asm!( + " + lddw r0, 0x10000000c + mov r1, 4 + div32 r0, r1 + exit", + config, + [], + (), + TestContextObject::new(4), + ProgramResult::Ok(0x3), + ); + test_interpreter_and_jit_asm!( + " + mov r0, 0xc + lsh r0, 32 + div r0, 4 + exit", + config, + [], + (), + TestContextObject::new(4), + ProgramResult::Ok(0x300000000), + ); + test_interpreter_and_jit_asm!( + " + mov r0, 0xc + lsh r0, 32 + mov r1, 4 + div r0, r1 + exit", + config, + [], + (), + TestContextObject::new(5), + ProgramResult::Ok(0x300000000), + ); + test_interpreter_and_jit_asm!( + " + mov32 r0, 1 + mov32 r1, 0 + div r0, r1 + exit", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Err(EbpfError::DivideByZero), + ); + test_interpreter_and_jit_asm!( + " + mov32 r0, 1 + mov32 r1, 0 + div32 r0, r1 + exit", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Err(EbpfError::DivideByZero), + ); +} + +#[test] +fn test_mod() { + let config = Config { + enable_sbpf_v2: false, + ..Config::default() + }; + test_interpreter_and_jit_asm!( + " + mov32 r0, 5748 + mod32 r0, 92 + mov32 r1, 13 + mod32 r0, r1 + exit", + config, + [], + (), + TestContextObject::new(5), + ProgramResult::Ok(0x5), + ); + test_interpreter_and_jit_asm!( + " + lddw r0, 0x100000003 + mod32 r0, 3 + exit", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(0x0), + ); + test_interpreter_and_jit_asm!( + " + mov32 r0, -1316649930 + lsh r0, 32 + or r0, 0x100dc5c8 + mov32 r1, 0xdde263e + lsh r1, 32 + or r1, 0x3cbef7f3 + mod r0, r1 + mod r0, 0x658f1778 + exit", + config, + [], + (), + TestContextObject::new(9), + ProgramResult::Ok(0x30ba5a04), + ); + test_interpreter_and_jit_asm!( + " + mov32 r0, 1 + mov32 r1, 0 + mod r0, r1 + exit", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Err(EbpfError::DivideByZero), + ); + test_interpreter_and_jit_asm!( + " + mov32 r0, 1 + mov32 r1, 0 + mod32 r0, r1 + exit", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Err(EbpfError::DivideByZero), + ); +} diff --git a/rbpf/tests/verifier.rs b/rbpf/tests/verifier.rs new file mode 100644 index 00000000000000..9146f7982b2077 --- /dev/null +++ b/rbpf/tests/verifier.rs @@ -0,0 +1,384 @@ +// Converted from the tests for uBPF +// Copyright 2015 Big Switch Networks, Inc +// Copyright 2016 6WIND S.A. +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +// The tests contained in this file are extracted from the unit tests of uBPF software. Each test +// in this file has a name in the form `test_verifier_`, and corresponds to the +// (human-readable) code in `ubpf/tree/master/tests/`, available at +// (hyphen had to be replaced with underscores +// as Rust will not accept them in function names). It is strongly advised to refer to the uBPF +// version to understand what these program do. +// +// Each program was assembled from the uBPF version with the assembler provided by uBPF itself, and +// available at . +// The very few modifications that have been realized should be indicated. + +// These are unit tests for the eBPF “verifier”. + +extern crate solana_rbpf; +extern crate thiserror; + +use solana_rbpf::{ + assembler::assemble, + ebpf, + elf::Executable, + program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, + verifier::{RequisiteVerifier, Verifier, VerifierError}, + vm::{Config, TestContextObject}, +}; +use std::sync::Arc; +use test_utils::{assert_error, create_vm}; +use thiserror::Error; + +/// Error definitions +#[derive(Debug, Error)] +pub enum VerifierTestError { + #[error("{0}")] + Rejected(String), +} + +struct TautologyVerifier {} +impl Verifier for TautologyVerifier { + fn verify( + _prog: &[u8], + _config: &Config, + _sbpf_version: &SBPFVersion, + _function_registry: &FunctionRegistry, + ) -> std::result::Result<(), VerifierError> { + Ok(()) + } +} + +struct ContradictionVerifier {} +impl Verifier for ContradictionVerifier { + fn verify( + _prog: &[u8], + _config: &Config, + _sbpf_version: &SBPFVersion, + _function_registry: &FunctionRegistry, + ) -> std::result::Result<(), VerifierError> { + Err(VerifierError::NoProgram) + } +} + +#[test] +fn test_verifier_success() { + let executable = assemble::( + " + mov32 r0, 0xBEE + exit", + Arc::new(BuiltinProgram::new_mock()), + ) + .unwrap(); + executable.verify::().unwrap(); + create_vm!( + _vm, + &executable, + &mut TestContextObject::default(), + stack, + heap, + Vec::new(), + None + ); +} + +#[test] +#[should_panic(expected = "NoProgram")] +fn test_verifier_fail() { + let executable = assemble::( + " + mov32 r0, 0xBEE + exit", + Arc::new(BuiltinProgram::new_mock()), + ) + .unwrap(); + executable.verify::().unwrap(); +} + +#[test] +#[should_panic(expected = "DivisionByZero(1)")] +fn test_verifier_err_div_by_zero_imm() { + let executable = assemble::( + " + mov32 r0, 1 + udiv32 r0, 0 + exit", + Arc::new(BuiltinProgram::new_mock()), + ) + .unwrap(); + executable.verify::().unwrap(); +} + +#[test] +#[should_panic(expected = "UnsupportedLEBEArgument(0)")] +fn test_verifier_err_endian_size() { + let prog = &[ + 0xdc, 0x01, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, // + 0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + ]; + let executable = Executable::::from_text_bytes( + prog, + Arc::new(BuiltinProgram::new_mock()), + SBPFVersion::V2, + FunctionRegistry::default(), + ) + .unwrap(); + executable.verify::().unwrap(); +} + +#[test] +#[should_panic(expected = "IncompleteLDDW(0)")] +fn test_verifier_err_incomplete_lddw() { + // Note: ubpf has test-err-incomplete-lddw2, which is the same + let prog = &[ + 0x18, 0x00, 0x00, 0x00, 0x88, 0x77, 0x66, 0x55, // + 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + ]; + let executable = Executable::::from_text_bytes( + prog, + Arc::new(BuiltinProgram::new_mock()), + SBPFVersion::V1, + FunctionRegistry::default(), + ) + .unwrap(); + executable.verify::().unwrap(); +} + +#[test] +fn test_verifier_err_invalid_reg_dst() { + // r11 is disabled when sbpf_version.dynamic_stack_frames()=false, and only sub and add are + // allowed when sbpf_version.dynamic_stack_frames()=true + for enable_sbpf_v2 in [false, true] { + let executable = assemble::( + " + mov r11, 1 + exit", + Arc::new(BuiltinProgram::new_loader( + Config { + enable_sbpf_v2, + ..Config::default() + }, + FunctionRegistry::default(), + )), + ) + .unwrap(); + let result = executable.verify::(); + assert_error!(result, "VerifierError(InvalidDestinationRegister(0))"); + } +} + +#[test] +fn test_verifier_err_invalid_reg_src() { + // r11 is disabled when sbpf_version.dynamic_stack_frames()=false, and only sub and add are + // allowed when sbpf_version.dynamic_stack_frames()=true + for enable_sbpf_v2 in [false, true] { + let executable = assemble::( + " + mov r0, r11 + exit", + Arc::new(BuiltinProgram::new_loader( + Config { + enable_sbpf_v2, + ..Config::default() + }, + FunctionRegistry::default(), + )), + ) + .unwrap(); + let result = executable.verify::(); + assert_error!(result, "VerifierError(InvalidSourceRegister(0))"); + } +} + +#[test] +fn test_verifier_resize_stack_ptr_success() { + let executable = assemble::( + " + add r11, -1 + add r11, 1 + exit", + Arc::new(BuiltinProgram::new_loader( + Config { + enable_stack_frame_gaps: false, + ..Config::default() + }, + FunctionRegistry::default(), + )), + ) + .unwrap(); + executable.verify::().unwrap(); +} + +#[test] +#[should_panic(expected = "JumpToMiddleOfLDDW(2, 0)")] +fn test_verifier_err_jmp_lddw() { + let executable = assemble::( + " + ja +1 + lddw r0, 0x1122334455667788 + exit", + Arc::new(BuiltinProgram::new_mock()), + ) + .unwrap(); + executable.verify::().unwrap(); +} + +#[test] +#[should_panic(expected = "InvalidFunction(1)")] +fn test_verifier_err_call_lddw() { + let executable = assemble::( + " + call 1 + lddw r0, 0x1122334455667788 + exit", + Arc::new(BuiltinProgram::new_mock()), + ) + .unwrap(); + executable.verify::().unwrap(); +} + +#[test] +#[should_panic(expected = "InvalidFunction(0)")] +fn test_verifier_err_function_fallthrough() { + let executable = assemble::( + " + mov r0, r1 + function_foo: + exit", + Arc::new(BuiltinProgram::new_mock()), + ) + .unwrap(); + executable.verify::().unwrap(); +} + +#[test] +#[should_panic(expected = "JumpOutOfCode(3, 0)")] +fn test_verifier_err_jmp_out() { + let executable = assemble::( + " + ja +2 + exit", + Arc::new(BuiltinProgram::new_mock()), + ) + .unwrap(); + executable.verify::().unwrap(); +} + +#[test] +#[should_panic(expected = "JumpOutOfCode(18446744073709551615, 0)")] +fn test_verifier_err_jmp_out_start() { + let executable = assemble::( + " + ja -2 + exit", + Arc::new(BuiltinProgram::new_mock()), + ) + .unwrap(); + executable.verify::().unwrap(); +} + +#[test] +#[should_panic(expected = "UnknownOpCode(6, 0)")] +fn test_verifier_err_unknown_opcode() { + let prog = &[ + 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + ]; + let executable = Executable::::from_text_bytes( + prog, + Arc::new(BuiltinProgram::new_mock()), + SBPFVersion::V2, + FunctionRegistry::default(), + ) + .unwrap(); + executable.verify::().unwrap(); +} + +#[test] +#[should_panic(expected = "CannotWriteR10(0)")] +fn test_verifier_err_write_r10() { + let executable = assemble::( + " + mov r10, 1 + exit", + Arc::new(BuiltinProgram::new_mock()), + ) + .unwrap(); + executable.verify::().unwrap(); +} + +#[test] +fn test_verifier_err_all_shift_overflows() { + let testcases = [ + // lsh32_imm + ("lsh32 r0, 16", Ok(())), + ("lsh32 r0, 32", Err("ShiftWithOverflow(32, 32, 0)")), + ("lsh32 r0, 64", Err("ShiftWithOverflow(64, 32, 0)")), + // rsh32_imm + ("rsh32 r0, 16", Ok(())), + ("rsh32 r0, 32", Err("ShiftWithOverflow(32, 32, 0)")), + ("rsh32 r0, 64", Err("ShiftWithOverflow(64, 32, 0)")), + // arsh32_imm + ("arsh32 r0, 16", Ok(())), + ("arsh32 r0, 32", Err("ShiftWithOverflow(32, 32, 0)")), + ("arsh32 r0, 64", Err("ShiftWithOverflow(64, 32, 0)")), + // lsh64_imm + ("lsh64 r0, 32", Ok(())), + ("lsh64 r0, 64", Err("ShiftWithOverflow(64, 64, 0)")), + // rsh64_imm + ("rsh64 r0, 32", Ok(())), + ("rsh64 r0, 64", Err("ShiftWithOverflow(64, 64, 0)")), + // arsh64_imm + ("arsh64 r0, 32", Ok(())), + ("arsh64 r0, 64", Err("ShiftWithOverflow(64, 64, 0)")), + ]; + + for (overflowing_instruction, expected) in testcases { + let assembly = format!("\n{overflowing_instruction}\nexit"); + let executable = + assemble::(&assembly, Arc::new(BuiltinProgram::new_mock())).unwrap(); + let result = executable.verify::(); + match expected { + Ok(()) => assert!(result.is_ok()), + Err(overflow_msg) => assert_error!(result, "VerifierError({overflow_msg})"), + } + } +} + +#[test] +fn test_sdiv_disabled() { + let instructions = [ + (ebpf::SDIV32_IMM, "sdiv32 r0, 2"), + (ebpf::SDIV32_REG, "sdiv32 r0, r1"), + (ebpf::SDIV64_IMM, "sdiv64 r0, 4"), + (ebpf::SDIV64_REG, "sdiv64 r0, r1"), + ]; + + for (opc, instruction) in instructions { + for enable_sbpf_v2 in [true, false] { + let assembly = format!("\n{instruction}\nexit"); + let executable = assemble::( + &assembly, + Arc::new(BuiltinProgram::new_loader( + Config { + enable_sbpf_v2, + ..Config::default() + }, + FunctionRegistry::default(), + )), + ) + .unwrap(); + let result = executable.verify::(); + if enable_sbpf_v2 { + assert!(result.is_ok()); + } else { + assert_error!(result, "VerifierError(UnknownOpCode({}, {}))", opc, 0); + } + } + } +}