diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 86c1e5e1..c173ef5a 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1201,3 +1201,32 @@ endif() # This must always be last! include(CPack) + +# Fuzz test with AFL++ +if (ENABLE_FUZZ_TEST) + add_executable(parse_sflow_v5_packet_fuzz tests/fuzz/parse_sflow_v5_packet_fuzz.cpp) + target_link_libraries(parse_sflow_v5_packet_fuzz sflow_plugin netflow_plugin example_plugin fastnetmon_logic ${LOG4CPP_LIBRARY_PATH}) + + add_executable(process_netflow_packet_v5_fuzz tests/fuzz/process_netflow_packet_v5_fuzz.cpp) + target_link_libraries(process_netflow_packet_v5_fuzz sflow_plugin netflow_plugin example_plugin fastnetmon_logic ${LOG4CPP_LIBRARY_PATH}) +endif() + +# Fuzz test with AFL++ or clang +if (ENABLE_FUZZ_TEST_LIBFUZZER) + add_executable(parse_sflow_v5_packet_fuzz tests/fuzz/parse_sflow_v5_packet_fuzz_libfuzzer.cpp) + target_link_libraries(parse_sflow_v5_packet_fuzz sflow_plugin netflow_plugin example_plugin fastnetmon_logic ${LOG4CPP_LIBRARY_PATH}) + target_compile_options(parse_sflow_v5_packet_fuzz PRIVATE -fsanitize=fuzzer) + target_link_options(parse_sflow_v5_packet_fuzz PRIVATE -fsanitize=fuzzer) + + add_executable(process_netflow_packet_v5_fuzz tests/fuzz/process_netflow_packet_v5_fuzz_libfuzzer.cpp) + target_link_libraries(process_netflow_packet_v5_fuzz sflow_plugin netflow_plugin example_plugin fastnetmon_logic ${LOG4CPP_LIBRARY_PATH}) + target_compile_options(process_netflow_packet_v5_fuzz PRIVATE -fsanitize=fuzzer) + target_link_options(process_netflow_packet_v5_fuzz PRIVATE -fsanitize=fuzzer) +endif() + +# Chaged interface socket to console input +if (ENABLE_FUZZ_TEST_DESOCK) + target_link_libraries(fastnetmon desock) +endif() + + \ No newline at end of file diff --git a/src/tests/fuzz/README.md b/src/tests/fuzz/README.md new file mode 100644 index 00000000..5bf4989d --- /dev/null +++ b/src/tests/fuzz/README.md @@ -0,0 +1,143 @@ +# Fuzzing + +This section describes the fuzzing testing process, the approaches used, and methods applied, including both successful and unsuccessful attempts. + +## Navigation +-------------------------------- + +- [Docker Image](#docker-image) +- [CMake](#cmake) +- [File Structure](#file-structure) +- [Example of Fuzzing Run](#example-of-fuzzing-run) +- [Other Fuzzing Techniques](#other-fuzzing-techniques) +- [Techniques That Didn't Work](#techniques-that-didnt-work) + +## Docker Image +-------------------------------- + +The script is based on `tests/Dockerfile.ubuntu-24.04-afl++`. +This image builds and installs everything necessary for further testing: + +| Module | Description | +|------------------------------------------|-----------------------------------------------------------| +| [`AFL++`](https://github.com/AFLplusplus) | Fuzzer | +| [`casr`](https://github.com/ispras/casr) | Utility for crash verification, minimization, and clustering. Requires rust | +| [`desock`](https://github.com/zardus/preeny/) | Utility for replacing system calls. Used in the project to replace the `socket` function, which takes data from the interface, with a function that takes data from the console | + +Sanitizers (ASAN, UBSAN, etc.) are also used in the project for runtime error detection. + +### Build Docker Image +-------------------------------- + +To build the Docker image for testing, run the following command from the root directory `fastnetmon`: +```bash +docker build -f tests/Dockerfile.ubuntu-24.04-afl++ . -t fuzz +``` + +After the build is complete, an `image` named `fuzz` will be created. + +## CMake +-------------------------------- +A number of options have been added to the source `CMakeLists.txt` file, allowing the building of separate fuzzing wrappers using different cmake options. +*The options in the table will be listed with the `-D` prefix, which allows setting the option as an argument to the cmake utility when run from the command line.* + +| Option | Description | +|-----------------------------------|-----------------------------------------------------------| +| `-DENABLE_FUZZ_TEST` | Builds two fuzzing wrappers for `AFL++`. Use **only with the `afl-c++` compiler** or its variations | +| `DENABLE_FUZZ_TEST_LIBFUZZER` | Builds two fuzzing wrappers for `libfuzzer`. Use **with the `clang` compiler or variations of `afl-c++`** | +| `-DENABLE_FUZZ_TEST_DESOCK` | This option allows modifying the behavior of the standard `socket` function. Now data will come from the input stream instead of the network socket. **Instruments the original `fastnetmon` executable** | +| `-DCMAKE_BUILD_TYPE=Debug` | Debugging option required for proper debugger functionality. **Do not use on release builds or during tests - false positives may occur with sanitizer functions like `assert()`** | + +## File Structure +-------------------------------- +```bash +fuzz/ +├── README.md +├── README_rus.md +├── fastnetmon.conf +├── parse_sflow_v5_packet_fuzz.cpp +├── parse_sflow_v5_packet_fuzz_libfuzzer.cpp +├── process_netflow_packet_v5_fuzz.cpp +├── process_netflow_packet_v5_fuzz_libfuzzer.cpp └── scripts/ +├── minimize_out.sh +├── start_fuzz_conf_file.sh +└── start_fuzz_harness.sh +``` +### File Descriptions +-------------------------------- + +| File | Description | +|-----------------------------------------|-----------------------------------------------------------------------------------------------| +| `README.md` | Documentation in **English** about the fuzz testing of the project. | +| `README_rus.md` | Documentation in **Russian** about the fuzz testing of the project. | +| `fastnetmon.conf` | Configuration file for FastNetMon. Only the netflow and sflow protocols are left for operation. | +| `parse_sflow_v5_packet_fuzz.cpp` | Wrapper for fuzzing the `parse_sflow_v5_packet_fuzz` function using `AFL++`. | +| `parse_sflow_v5_packet_fuzz_libfuzzer.cpp` | Wrapper for fuzzing the `parse_sflow_v5_packet_fuzz` function using `libfuzzer`. | +| `process_netflow_packet_v5_fuzz.cpp` | Wrapper for fuzzing the `process_netflow_packet_v5_fuzz` function using `AFL++`. | +| `process_netflow_packet_v5_fuzz_libfuzzer.cpp` | Wrapper for fuzzing the `process_netflow_packet_v5_fuzz` function using `libfuzzer`. | + +| File/Directory | Description | Run | +|----------------------------------------|-------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------| +| `/scripts/` | Directory containing scripts for automating fuzzing. | | +| `/scripts/minimize_out.sh` | Script for minimizing, verifying, and clustering crash outputs. | `./minimize_out.sh <./binary>` | +| `/scripts/start_fuzz_conf_file.sh` | Script for running fuzzing on configuration files. Launches several tmux sessions. Uses options `./fastnetmon --configuration_check --configuration_file`. | Run from the current directory without additional options. The environment is automatically set up. | +| `/scripts/start_fuzz_harness.sh` | Script for testing binary files compiled from wrappers into separate executables. Designed for wrappers compiled for `AFL++`. It sets up the environment, creates folders, and launches two tmux sessions with fuzzer instances. After fuzzing ends, it runs the `minimize_out.sh` script for crash clustering. | `./start_fuzz_harness.sh ` The script will stop if no new paths are found within a certain time. By default, the time is 1 hour. To change it, modify the `TIME` variable (in seconds) inside the script. | + + +## Example of Fuzzing Run +-------------------------------- + +Run the container: +```bash +docker run --privileged -it fuzz /bin/bash +``` + +To enable multi-threaded fuzzing with AFL++, we set up core dumping: +```bash +echo core | tee /proc/sys/kernel/core_pattern +``` +With the standard `docker image` build, the `build_fuzz` directory will be created, inside which the fuzzing wrappers will be compiled: +- `parse_sflow_v5_packet_fuzz` +- `process_netflow_packet_v5_fuzz` + +To run fuzzing, we use the `start_fuzz_harness` script: + +```bash +/src/tests/fuzz/scripts/start_fuzz_harness.sh ./process_netflow_packet_v5_fuzz +``` +Or +```bash +/src/tests/fuzz/scripts/start_fuzz_harness.sh ./parse_sflow_v5_packet_fuzz +``` +After starting, a directory `_fuzz_dir` will be created, containing the input and output folders. +A `tmux session` will be started with two tabs — each running an instance of the `AFL++` fuzzer. +Fuzzing will continue until no new paths are found within one hour (this timeout value can be modified in the script). +After that, the tmux session will end, and crash clustering and verification will begin with the `minimize_out.sh` script. + +## Other Fuzzing Techniques + +### Coarse Code Intervention Using Persistent Mode AFL++ +-------------------------------- + +The fuzzer `AFL++` allows for rewriting parts of the code for fuzzing, significantly increasing the fuzzing speed (the program does not terminate after processing one data set, but instead restarts the cycle with the target function multiple times). + +This approach can be used to instrument two different targets: +- `src/netflow_plugin/netflow_collector.cpp : start_netflow_collector(...)` +- `src/sflow_plugin/sflow_collector.cpp : start_sflow_collector(...)` + +How the instrumentation looks: +1. Add the construct `__AFL_FUZZ_INIT();` before the target function. +2. Replace the `while (true)` loop with `while (__AFL_LOOP(10000))`. +3. Replace `char udp_buffer[udp_buffer_size];` with `unsigned char * udp_buffer = __AFL_FUZZ_TESTCASE_BUF;`. +4. Replace `int received_bytes = recvfrom(sockfd, udp_buffer, udp_buffer_size, 0, (struct sockaddr*)&client_addr, &address_len);` with `int received_bytes = __AFL_FUZZ_TESTCASE_LEN;`. +5. Build with the AFL++ compiler and sanitizers. No wrappers are needed for compilation. +6. Run fuzzing with: `afl-fuzz -i in -o out -- ./fastnetmon` + + +### Techniques That Didn't Work +-------------------------------- + +| Name | Description | +|---------------------|------------------------------------------------------------------------------------------------------| +| `AFLNet` | The characteristics of the protocol (lack of feedback) prevent the use of this fuzzer. | +| `desock` | Code instrumentation is successful, but the fuzzer does not start and cannot collect feedback. I consider this method **promising**, but the fuzzer requires adjustments. | diff --git a/src/tests/fuzz/README_rus.md b/src/tests/fuzz/README_rus.md new file mode 100644 index 00000000..57d2e4d3 --- /dev/null +++ b/src/tests/fuzz/README_rus.md @@ -0,0 +1,150 @@ +# Fuzzing + +В этом разделе описан процесс фаззинг-тестирования, использованные подходы и методы, включая как успешные, так и неудачные попытки. + +## Навигация +-------------------------------- + +- [Docker Image](#docker-image) +- [CMake](#cmake) +- [Файловая структура](#файловая-структура) +- [Пример запуска фаззинга](#пример-запуска-фаззинга) +- [Другие техники фаззинга](#другие-техники-фаззинга) +- [Техники, которые не дали результата](#техники-которые-не-дали-результата) + +## Docker Image +-------------------------------- + +Скрипт базируется на `tests/Dockerfile.ubuntu-24.04-afl++`. +В этом образе собирается и устанавливается все необходимое для дальнейшего тестирования: + +| Модуль | Описание | +|---------------------------------------------|-------------------------------------------------------------| +| [`AFL++`](https://github.com/AFLplusplus) | Фаззер | +| [`casr`](https://github.com/ispras/casr) | Утилита для проверки, минимазации и кластеризации крашей. Нужен rust | +| [`desock`](https://github.com/zardus/preeny/) | Утилита для подмены системных вызовов. Используется в проекте для замены вызова функции `socket`, которая принимает данные с интерфейса, на функцию, которая принимает данные из консоли | + +Так же в проекте используются санитайзеры (ASAN, UBSAN и др.) для поиска ошибок в динамике. + +### Build Docker Image +-------------------------------- + +Сборка Docker image для тестирования происходит из коренной директории `fastnetmon` и запускается командой: +```bash +docker build -f tests/Dockerfile.ubuntu-24.04-afl++ . -t fuzz +``` +По завершению сборки буден получен `image` с названием `fuzz` + +## Cmake +-------------------------------- +В исходный файл `CmakeLists.txt` был добавлен ряд опций, которые позволяю собирать отдельные фаззинг-обертки используя различные опции cmake. +*В таблице опции будут указываться с приставкой `-D`, которая позволяет задать опцию, как аргумент утилиты cmake при запуске из командой строки* + + +| Опция | Описание | +|------------------------------------|-------------------------------------------------------------| +| `-DENABLE_FUZZ_TEST` | Сбирает две фаззинг обертки под `AFL++`. Использовать **только с копилятором `afl-c++`** или его вариациями | +| `DENABLE_FUZZ_TEST_LIBFUZZER` | Сбирает две фаззинг обертки под `libfuzzer`. Использовать **с копилятором `clang` или вариациями `afl-c++`** | +| `-DENABLE_FUZZ_TEST_DESOCK` | Дананя опция позволяет реализоват изменение поведения стандартной функции `socket`. Теперь данные будут браться не с сетевого сокета, из потока ввода. **Инструментирует оригинальный исполняемый файл `fastnetmon`** | +| `-DCMAKE_BUILD_TYPE=Debug` | Отладочная опция, необходимая для корректной работы отладчиков. Внимание! **Не использовать на релизе и при тестах - будут ложные срабатывания санитайзера на таких функциях, как `assert()` + + + +## Файловая структура +-------------------------------- +``` +fuzz/ +├── README.md +├── README_rus.md +├── fastnetmon.conf +├── parse_sflow_v5_packet_fuzz.cpp +├── parse_sflow_v5_packet_fuzz_libfuzzer.cpp +├── process_netflow_packet_v5_fuzz.cpp +├── process_netflow_packet_v5_fuzz_libfuzzer.cpp +└── scripts/ +│ ├── minimize_out.sh +│ ├── start_fuzz_conf_file.sh +│ └── start_fuzz_harness.sh +``` + +### Описание файловов +-------------------------------- + +| Файл | Описание | +|-----------------------------------------|------------------------------------------------------------------------------------------| +| `README.md` | Документация на **английском языке** о фаззинг-тестировании проекта. | +| `README_rus.md` | Документация на **русском языке** о фаззинг-тестировании проекта. | +| `fastnetmon.conf` | Конфигурационный файл для FastNetMon. Оставлены для работы только протоколы netflow и sflow | +| `parse_sflow_v5_packet_fuzz.cpp` | Обертка для фаззинга функции `parse_sflow_v5_packet_fuzz` через `AFL++`. | +| `parse_sflow_v5_packet_fuzz_libfuzzer.cpp` | Обертка для фаззинга функции `parse_sflow_v5_packet_fuzz` через `libfuzzer`.| +| `process_netflow_packet_v5_fuzz.cpp` | Обертка для фаззинга функции `process_netflow_packet_v5_fuzz` через `AFL++`. | +| `process_netflow_packet_v5_fuzz_libfuzzer.cpp` | Обертка для фаззинга функции `process_netflow_packet_v5_fuzz` через `libfuzzer`. | + +| Файл/Директория | Описание | Запуск | +|-----------------------------------------|--------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------| +| `/scripts/` | Директория со скриптами для автоматизации фаззинга. | | +| `/scripts/minimize_out.sh` | Скрипт для минимизации, верификации и кластеризации выходных данных (падений). | `./minimize_out.sh <./binary>` | +| `/scripts/start_fuzz_conf_file.sh` | Скрипт для запуска фаззинга конфигурационных файлов. Запускает несколько сессий tmux. Использует опции `./fastnetmon --configuration_check --configuration_file`. | Запускать из текущей директории без дополнительных опций. Окружение создаётся автоматически. | +| `/scripts/start_fuzz_harness.sh` | Скрипт для тестирования бинарных файлов, скомпилированных из обёрток в отдельные исполняемые файлы. Предназначен для обёрток, скомпилированных под `AFL++`. При запуске настраивает окружение, создаёт папки, запускает две сессии tmux с экземплярами фаззера. После окончания фаззинга запускает скрипт `minimize_out.sh` для кластеризации крашей. | `./start_fuzz_harness.sh ` Скрипт завершится, если не будет найдено новых путей в течение определённого времени. По умолчанию время равно 1 часу. Для изменения изменить переменную `TIME` (в секундах) внутри скрипта. | + + + +## Пример запуска фаззинга +-------------------------------- +Запускаем контейнер: +```bash +docker run --privileged -it fuzz /bin/bash +``` + +Для запуска фаззинга AFL++ разрешим многопточную работу: +```bash +echo core | tee /proc/sys/kernel/core_pattern +``` + +При стандратной сборке `docker image` у нас будет папка `build_fuzz`, внутри которой будт скомпилированы для фаззинг-обертки: +- `parse_sflow_v5_packet_fuzz` +- `process_netflow_packet_v5_fuzz` + +Для фаззинга их используем скрипт `start_fuzz_harness`: +```bash +/src/tests/fuzz/scripts/start_fuzz_harness.sh ./process_netflow_packet_v5_fuzz +``` +Или +```bash +/src/tests/fuzz/scripts/start_fuzz_harness.sh ./parse_sflow_v5_packet_fuzz +``` + +После запуска будет создана директория `_fuzz_dir`, внутри которого будут созданы папки `input` и `output`. +Будте запущена сессия `tmux` с двумя вкладками - на каждой будет инстанс фаззера `AFL++`. +Фаззинг будет продолжаться, пока не будет прироста новых путей в течении часа (можно поменять значение внутри скрипта). +Далее сессия tmux будет завершена, начнется кластеризация и проверка падений с помощью скрипта `minimize_out.sh` + +## Другие техники фаззинга + +### Грубое вмешательство в код с помощью Persistant Mode AFL++ +-------------------------------- +Фаззер `AFL++` позволяет переписать часть кода по фаззинг, попутно кратно увеличив скорость фаззинга (програма не заавершается после подачи одного набора даннх, а перезапускает цикл с функцией целью несколько раз). + +Таким способом можно инструментировать две разных цели: +- `src/netflow_plugin/netflow_collector.cpp : start_netflow_collector(...)` +- `src/sflow_plugin/sflow_collector.cpp : start_sflow_collector(...)` + +Как выглядит инструментация: +1. Перед целевой функций добавить конструкцию `__AFL_FUZZ_INIT();` +2. Цикл `while (true)` заменить на `while (__AFL_LOOP(10000))` +3. `char udp_buffer[udp_buffer_size];` заменить на `unsigned char * udp_buffer =__AFL_FUZZ_TESTCASE_BUF;` +4. `int received_bytes = recvfrom(sockfd, udp_buffer, udp_buffer_size, 0, (struct sockaddr*)&client_addr, &address_len);` заменить на int received_bytes = __AFL_FUZZ_TESTCASE_LEN; +5. Собрать с компилятором AFL++ и санитайзерами. Собирать какие-либо обертки не нужно. +6. Запустить фаззинг `afl-fuzz -i in -o out -- ./fastnetmon` + + +### Техники, которые не дали результата +-------------------------------- +| Название | Описание | +|---------------------|------------------------------------------------------------------------------------------| +| `AFLNet` | Особенности протокола (отсутствие обратной связи) не дают использовать данный фаззер | +| `desock` | Инструментация кода успешна, но фаззер не запускается - так же не может собрать обратную связь. Считаю данный способ **перспективным**, необходима корректировка фаззера. | + + + + diff --git a/src/tests/fuzz/fastnetmon.conf b/src/tests/fuzz/fastnetmon.conf new file mode 100644 index 00000000..b46bfb28 --- /dev/null +++ b/src/tests/fuzz/fastnetmon.conf @@ -0,0 +1,387 @@ +### +### Main configuration params +### + +### Logging configuration + +# Logging level, can be info or debug +logging_level = info + +# enable this option if you want to send logs to local syslog facility +logging_local_syslog_logging = off + +# enable this option if you want to send logs to a remote syslog server via UDP +logging_remote_syslog_logging = off + +# specify a custom server and port for remote logging +logging_remote_syslog_server = 10.10.10.10 +logging_remote_syslog_port = 514 + +# To make FastNetMon better we need to know how you use it and what's your software and hardware platform. +# To accomplish this FastNetMon sends usage information every 1 hour to our statistics server https://community-stats.fastnetmon.com +# We keep high standards of data protection and you can find our privacy policy here: https://community-stats.fastnetmon.com +# You can find information which is being sent at GitHub: https://github.com/pavel-odintsov/fastnetmon/search?q=send_usage_data_to_reporting_server +# If you prefer to disable this capability you need to set following flag to on +disable_usage_report = off + +# Enable/Disable any actions in case of attack +enable_ban = on + +# Enable ban for IPv6 +enable_ban_ipv6 = on + +# disable processing for certain direction of traffic +process_incoming_traffic = on +process_outgoing_traffic = on + +# dump all traffic to log file +dump_all_traffic = off + +# dump other traffic to log, useful to detect missed prefixes +dump_other_traffic = off + +# How many packets will be collected from attack traffic +ban_details_records_count = 20 + +# How long (in seconds) we should keep an IP in blocked state +# If you set 0 here it completely disables unban capability +ban_time = 1900 + +# Check if the attack is still active, before triggering an unban callback with this option +# If the attack is still active, check each run of the unban watchdog +unban_only_if_attack_finished = on + +# list of all your networks in CIDR format +networks_list_path = /etc/networks_list + +# list networks in CIDR format which will be not monitored for attacks +white_list_path = /etc/networks_whitelist + +# redraw period for client's screen +check_period = 1 + +# Connection tracking is very useful for attack detection because it provides huge amounts of information, +# but it's very CPU intensive and not recommended in big networks +enable_connection_tracking = on + +# Different approaches to attack detection +ban_for_pps = on +ban_for_bandwidth = on +ban_for_flows = off + +# Limits for Dos/DDoS attacks +threshold_pps = 20000 +threshold_mbps = 1000 +threshold_flows = 3500 + +# Per protocol attack thresholds +# We do not implement per protocol flow limits due to flow calculation logic limitations +# These limits should be smaller than global pps/mbps limits + +threshold_tcp_mbps = 100000 +threshold_udp_mbps = 100000 +threshold_icmp_mbps = 100000 + +threshold_tcp_pps = 100000 +threshold_udp_pps = 100000 +threshold_icmp_pps = 100000 + +ban_for_tcp_bandwidth = off +ban_for_udp_bandwidth = off +ban_for_icmp_bandwidth = off + +ban_for_tcp_pps = off +ban_for_udp_pps = off +ban_for_icmp_pps = off + +### +### Traffic capture methods +### + +# +# Default option for port mirror capture on Linux +# AF_PACKET capture engine +mirror_afpacket = off + +# High efficient XDP based traffic capture method +# XDP will detach network interface from Linux network stack completely and you may lose connectivity if your route management traffic over same interface +# You need to have separate network card for management interface +mirror_afxdp = off + +# Activates poll based logic to check for new packets. Generally, it eliminates active polling and reduces CPU load +poll_mode_xdp = off + +# Set interface into promisc mode automatically +xdp_set_promisc = on + +# Explicitly enable zero copy mode, requires driver support +zero_copy_xdp = off + +# Forces native XDP mode which requires support from network card +force_native_mode_xdp = off + +# Switch to using IP length as packet length instead of data from capture engine. Must be enabled when traffic is cropped externally +xdp_read_packet_length_from_ip_header = off + +# Path to XDP microcode programm for packet processing +microcode_xdp_path = /etc/xdp_kernel.o + +# You can use this option to multiply all incoming traffc by this value +# It may be useful for sampled mirror ports +mirror_af_packet_custom_sampling_rate = 1 + +# AF_PACKET fanout mode mode, http://man7.org/linux/man-pages/man7/packet.7.html +# Available modes: cpu, lb, hash, random, rollover, queue_mapping +mirror_af_packet_fanout_mode = cpu + +# This option should be enabled if you are using Juniper with mirroring of the first X bytes of packet: maximum-packet-length 110; +af_packet_read_packet_length_from_ip_header = off + +# Netmap traffic capture, only for FreeBSD +mirror_netmap = off + +# Netmap based mirroring sampling ratio +netmap_sampling_ratio = 1 + +# This option should be enabled if you are using Juniper with mirroring of the first X bytes of packet: maximum-packet-length 110; +netmap_read_packet_length_from_ip_header = off + +# Pcap mode, very slow and not recommended for production use +pcap = off + +# Netflow capture method with v5, v9 and IPFIX support +netflow = on + +# sFLOW capture suitable for switches +sflow = on + +# Configuration for Netmap, mirror, pcap, AF_XDP modes +# For pcap we could specify "any" +# For Netmap we could specify multiple interfaces separated by comma +interfaces = ens160 + +# We use average values for traffic speed to certain IP and we calculate average over this time periond (seconds) +average_calculation_time = 5 + +# Delay between traffic recalculation attempts +speed_calculation_delay = 1 + +# Netflow configuration + +# it's possible to specify multiple ports here, using commas as delimiter +netflow_port = 2055 + +# +# Netflow collector host to listen on. +# +# To bind on all interfaces for IPv4 and IPv6 use :: +# To bind only on IPv4 use 0.0.0.0 + +# To bind only on IPv4 use 127.0.0.1 +# +netflow_host = 0.0.0.0 + +# Netflow v9 and IPFIX agents use different and very complex approaches for notifying about sample ratio +# Here you could specify a sampling ratio for all this agents +# For NetFlow v5 we extract sampling ratio from packets directely and this option not used +netflow_sampling_ratio = 1 + +# sFlow configuration + +# It's possible to specify multiple ports here, using commas as delimiter +sflow_port = 6343 +# sflow_port = 6343,6344 +sflow_host = 0.0.0.0 + +# Some vendors may lie about full packet length in sFlow packet. To avoid this issue we can switch to using IP packet length from parsed header +sflow_read_packet_length_from_ip_header = off + +### +### Actions when attack detected +### + +# This script executed for ban, unban and attack detail collection +notify_script_path = /usr/local/bin/notify_about_attack.sh + +# collect a full dump of the attack with full payload in pcap compatible format +collect_attack_pcap_dumps = off + +# Save attack details to Redis +redis_enabled = off + +# Redis configuration +redis_port = 6379 +redis_host = 127.0.0.1 + +# specify a custom prefix here +redis_prefix = mydc1 + +# We could store attack information to MongoDB +mongodb_enabled = off +mongodb_host = localhost +mongodb_port = 27017 +mongodb_database_name = fastnetmon + +# Announce blocked IPs with BGP protocol with ExaBGP +exabgp = off +exabgp_command_pipe = /var/run/exabgp.cmd +exabgp_community = 65001:666 + +# specify multiple communities with this syntax: +# exabgp_community = [65001:666 65001:777] + +# specify different communities for host and subnet announces +# exabgp_community_subnet = 65001:667 +# exabgp_community_host = 65001:668 + +exabgp_next_hop = 10.0.3.114 + +# In complex cases you could have both options enabled and announce host and subnet simultaneously + +# Announce /32 host itself with BGP +exabgp_announce_host = on + +# Announce origin subnet of IP address instead IP itself +exabgp_announce_whole_subnet = off + +# GoBGP integration +gobgp = off + +# Configuration for IPv4 announces +gobgp_next_hop = 0.0.0.0 +gobgp_next_hop_host_ipv4 = 0.0.0.0 +gobgp_next_hop_subnet_ipv4 = 0.0.0.0 + +gobgp_announce_host = on +gobgp_announce_whole_subnet = off + +gobgp_community_host = 65001:666 +gobgp_community_subnet = 65001:777 + +# Configuration for IPv6 announces +gobgp_next_hop_ipv6 = 100::1 +gobgp_next_hop_host_ipv6 = 100::1 +gobgp_next_hop_subnet_ipv6 = 100::1 +gobgp_announce_host_ipv6 = on +gobgp_announce_whole_subnet_ipv6 = off + +gobgp_community_host_ipv6 = 65001:666 +gobgp_community_subnet_ipv6 = 65001:777 + +# Before using InfluxDB you need to create database using influx tool: +# create database fastnetmon + +# InfluxDB integration +# More details can be found here: https://fastnetmon.com/docs/influxdb_integration/ +influxdb = off +influxdb_host = 127.0.0.1 +influxdb_port = 8086 +influxdb_database = fastnetmon + +# InfluxDB auth +influxdb_auth = off +influxdb_user = fastnetmon +influxdb_password = secure + +# How often we export metrics to InfluxDB +influxdb_push_period = 1 + +# Clickhouse metrics export + +# Enables metrics export to Clickhouse +clickhouse_metrics = off + +# Clickhosue database name +clickhouse_metrics_database = fastnetmon + +# Clickhouse login +clickhouse_metrics_username = default + +# Clickhouse password +# clickhouse_metrics_password = secure-password + +# Clickhouse host +clickhouse_metrics_host = 127.0.0.1 + +# Clickhouse port +clickhouse_metrics_port = 9000 + +# Clickhouse push period, how often we export metrics to Clickhouse +clickhouse_metrics_push_period = 1 + +# Graphite monitoring +graphite = off +# Please use only IP because domain names are not allowed here +graphite_host = 127.0.0.1 +graphite_port = 2003 + +# Default namespace for Graphite data +graphite_prefix = fastnetmon + +# How often we export metrics to Graphite +graphite_push_period = 1 + +# Add local IP addresses and aliases to monitoring list +# Works only for Linux +monitor_local_ip_addresses = on + +# Add IP addresses for OpenVZ / Virtuozzo VEs to network monitoring list +monitor_openvz_vps_ip_addresses = off + +# Create group of hosts with non-standard thresholds +# You should create this group before (in configuration file) specifying any limits +# hostgroup = my_hosts:10.10.10.221/32,10.10.10.222/32 + +# Configure this group +my_hosts_enable_ban = off + +my_hosts_ban_for_pps = off +my_hosts_ban_for_bandwidth = off +my_hosts_ban_for_flows = off + +my_hosts_threshold_pps = 100000 +my_hosts_threshold_mbps = 1000 +my_hosts_threshold_flows = 3500 + +# Path to pid file for checking "if another copy of tool is running", it's useful when you run multiple instances of tool +pid_path = /var/run/fastnetmon.pid + +# Path to file where we store IPv4 traffic information for fastnetmon_client +cli_stats_file_path = /tmp/fastnetmon.dat + +# Path to file where we store IPv6 traffic information for fastnetmon_client +cli_stats_ipv6_file_path = /tmp/fastnetmon_ipv6.dat + +# Enable gRPC API (required for fastnetmon_api_client tool) +enable_api = on + +# Enables traffic export to Kafka +kafka_traffic_export = off + +# Kafka traffic export topic name +kafka_traffic_export_topic = fastnetmon + +# Kafka traffic export format: json or protobuf +kafka_traffic_export_format = json + +# Kafka traffic export list of brokers separated by comma +kafka_traffic_export_brokers = 10.154.0.1:9092,10.154.0.2:9092 + +# Prometheus monitoring endpoint +prometheus = on + +# Prometheus port +prometheus_port = 9209 + +# Prometheus host +prometheus_host = 127.0.0.1 + +### +### Client configuration +### + +# Field used for sorting in client, valid values are: packets, bytes or flows +sort_parameter = packets + +# How much IPs will be listed for incoming and outgoing channel eaters +max_ips_in_list = 7 diff --git a/src/tests/fuzz/parse_sflow_v5_packet_fuzz.cpp b/src/tests/fuzz/parse_sflow_v5_packet_fuzz.cpp new file mode 100644 index 00000000..09fabb42 --- /dev/null +++ b/src/tests/fuzz/parse_sflow_v5_packet_fuzz.cpp @@ -0,0 +1,58 @@ +#include "../../abstract_subnet_counters.hpp" +#include "../../fastnetmon_configuration_scheme.hpp" +#include "../../bgp_protocol_flow_spec.hpp" +#include "../../netflow_plugin/netflow_collector.hpp" +#include "../../sflow_plugin/sflow_collector.hpp" +#include "../../fastnetmon_logic.hpp" + + +log4cpp::Category& logger = log4cpp::Category::getRoot(); +time_t current_inaccurate_time = 0; +fastnetmon_configuration_t fastnetmon_global_configuration; +packet_buckets_storage_t packet_buckets_ipv6_storage; +bool DEBUG_DUMP_ALL_PACKETS = false; +bool DEBUG_DUMP_OTHER_PACKETS = false; +uint64_t total_ipv6_packets = 0; +uint64_t total_ipv4_packets = 0; +patricia_tree_t *lookup_tree_ipv4; +patricia_tree_t *lookup_tree_ipv6; + +uint64_t total_flowspec_whitelist_packets = 0; +uint64_t total_simple_packets_processed = 0; +uint64_t unknown_ip_version_packets = 0; +bool process_incoming_traffic = true; +bool process_outgoing_traffic = true; +bool enable_connection_tracking = true; + +std::vector static_flowspec_based_whitelist; +packet_buckets_storage_t packet_buckets_ipv4_storage; + +total_speed_counters_t total_counters_ipv4; +total_speed_counters_t total_counters_ipv6; + +abstract_subnet_counters_t ipv6_network_counters; +abstract_subnet_counters_t ipv6_host_counters; +abstract_subnet_counters_t ipv4_network_counters; +abstract_subnet_counters_t ipv4_host_counters; + +map_of_vector_counters_for_flow_t SubnetVectorMapFlow; +std::mutex flow_counter_mutex; + +extern process_packet_pointer sflow_process_func_ptr = process_packet; + +__AFL_FUZZ_INIT(); +int main(int argc, char** argv) { + uint32_t client_ipv4_address = 128; + std::string ip_add = "192.168.0.1"; + uint16_t version = 5; + + unsigned char* udp_buffer = __AFL_FUZZ_TESTCASE_BUF; + udp_buffer[0] = (version >> 8) & 0xFF; // Higher byte + udp_buffer[1] = version & 0xFF; + + while (__AFL_LOOP(10000)) { + unsigned int received_bytes = __AFL_FUZZ_TESTCASE_LEN; + parse_sflow_v5_packet( (uint8_t*)udp_buffer, received_bytes, client_ipv4_address); + } + return 0; +} \ No newline at end of file diff --git a/src/tests/fuzz/parse_sflow_v5_packet_fuzz_libfuzzer.cpp b/src/tests/fuzz/parse_sflow_v5_packet_fuzz_libfuzzer.cpp new file mode 100644 index 00000000..22822a39 --- /dev/null +++ b/src/tests/fuzz/parse_sflow_v5_packet_fuzz_libfuzzer.cpp @@ -0,0 +1,49 @@ +#include "../../abstract_subnet_counters.hpp" +#include "../../fastnetmon_configuration_scheme.hpp" +#include "../../bgp_protocol_flow_spec.hpp" +#include "../../netflow_plugin/netflow_collector.hpp" +#include "../../sflow_plugin/sflow_collector.hpp" +#include "../../fastnetmon_logic.hpp" + + +log4cpp::Category& logger = log4cpp::Category::getRoot(); +time_t current_inaccurate_time = 0; +fastnetmon_configuration_t fastnetmon_global_configuration; +packet_buckets_storage_t packet_buckets_ipv6_storage; +bool DEBUG_DUMP_ALL_PACKETS = false; +bool DEBUG_DUMP_OTHER_PACKETS = false; +uint64_t total_ipv6_packets = 0; +uint64_t total_ipv4_packets = 0; +patricia_tree_t *lookup_tree_ipv4; +patricia_tree_t *lookup_tree_ipv6; + +uint64_t total_flowspec_whitelist_packets = 0; +uint64_t total_simple_packets_processed = 0; +uint64_t unknown_ip_version_packets = 0; +bool process_incoming_traffic = true; +bool process_outgoing_traffic = true; +bool enable_connection_tracking = true; + +std::vector static_flowspec_based_whitelist; +packet_buckets_storage_t packet_buckets_ipv4_storage; + +total_speed_counters_t total_counters_ipv4; +total_speed_counters_t total_counters_ipv6; + +abstract_subnet_counters_t ipv6_network_counters; +abstract_subnet_counters_t ipv6_host_counters; +abstract_subnet_counters_t ipv4_network_counters; +abstract_subnet_counters_t ipv4_host_counters; + +map_of_vector_counters_for_flow_t SubnetVectorMapFlow; +std::mutex flow_counter_mutex; + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + if (Size < 2) { + return 0; + } + uint32_t client_ipv4_address = 128; + std::string ip_add = "192.168.0.1"; + process_netflow_packet((unsigned char *)Data, Size, ip_add, client_ipv4_address); + return 0; +} \ No newline at end of file diff --git a/src/tests/fuzz/process_netflow_packet_v5_fuzz.cpp b/src/tests/fuzz/process_netflow_packet_v5_fuzz.cpp new file mode 100644 index 00000000..77808d7e --- /dev/null +++ b/src/tests/fuzz/process_netflow_packet_v5_fuzz.cpp @@ -0,0 +1,61 @@ +#include "../../abstract_subnet_counters.hpp" +#include "../../fastnetmon_configuration_scheme.hpp" +#include "../../bgp_protocol_flow_spec.hpp" +#include "../../netflow_plugin/netflow_collector.hpp" +#include "../../sflow_plugin/sflow_collector.hpp" +#include "../../fastnetmon_logic.hpp" + + +log4cpp::Category& logger = log4cpp::Category::getRoot(); +time_t current_inaccurate_time = 0; +fastnetmon_configuration_t fastnetmon_global_configuration; +packet_buckets_storage_t packet_buckets_ipv6_storage; +bool DEBUG_DUMP_ALL_PACKETS = false; +bool DEBUG_DUMP_OTHER_PACKETS = false; +uint64_t total_ipv6_packets = 0; +uint64_t total_ipv4_packets = 0; +patricia_tree_t *lookup_tree_ipv4; +patricia_tree_t *lookup_tree_ipv6; + +uint64_t total_flowspec_whitelist_packets = 0; +uint64_t total_simple_packets_processed = 0; +uint64_t unknown_ip_version_packets = 0; +bool process_incoming_traffic = true; +bool process_outgoing_traffic = true; +bool enable_connection_tracking = true; + +std::vector static_flowspec_based_whitelist; +packet_buckets_storage_t packet_buckets_ipv4_storage; + +total_speed_counters_t total_counters_ipv4; +total_speed_counters_t total_counters_ipv6; + +abstract_subnet_counters_t ipv6_network_counters; +abstract_subnet_counters_t ipv6_host_counters; +abstract_subnet_counters_t ipv4_network_counters; +abstract_subnet_counters_t ipv4_host_counters; + +map_of_vector_counters_for_flow_t SubnetVectorMapFlow; +std::mutex flow_counter_mutex; + +extern process_packet_pointer netflow_process_func_ptr = process_packet; + +__AFL_FUZZ_INIT(); +int main(int argc, char** argv) { + uint32_t client_ipv4_address = 128; + std::string ip_add = "192.168.0.1"; + uint16_t version = 5; + + unsigned char* udp_buffer = __AFL_FUZZ_TESTCASE_BUF; + udp_buffer[0] = (version >> 8) & 0xFF; // Higher byte + udp_buffer[1] = version & 0xFF; + + while (__AFL_LOOP(10000)) { + unsigned int received_bytes = __AFL_FUZZ_TESTCASE_LEN; + process_netflow_packet(udp_buffer, received_bytes, ip_add, client_ipv4_address); + } + return 0; +} + + +//afl-fuzz with export ASAN_OPTIONS=detect_odr_violation=0:abort_on_error=1:symbolize=0 \ No newline at end of file diff --git a/src/tests/fuzz/process_netflow_packet_v5_fuzz_libfuzzer.cpp b/src/tests/fuzz/process_netflow_packet_v5_fuzz_libfuzzer.cpp new file mode 100644 index 00000000..c187a3c9 --- /dev/null +++ b/src/tests/fuzz/process_netflow_packet_v5_fuzz_libfuzzer.cpp @@ -0,0 +1,54 @@ +#include "../../abstract_subnet_counters.hpp" +#include "../../fastnetmon_configuration_scheme.hpp" +#include "../../bgp_protocol_flow_spec.hpp" +#include "../../netflow_plugin/netflow_collector.hpp" +#include "../../sflow_plugin/sflow_collector.hpp" +#include "../../fastnetmon_logic.hpp" + + +log4cpp::Category& logger = log4cpp::Category::getRoot(); +time_t current_inaccurate_time = 0; +fastnetmon_configuration_t fastnetmon_global_configuration; +packet_buckets_storage_t packet_buckets_ipv6_storage; +bool DEBUG_DUMP_ALL_PACKETS = false; +bool DEBUG_DUMP_OTHER_PACKETS = false; +uint64_t total_ipv6_packets = 0; +uint64_t total_ipv4_packets = 0; +patricia_tree_t *lookup_tree_ipv4; +patricia_tree_t *lookup_tree_ipv6; + +uint64_t total_flowspec_whitelist_packets = 0; +uint64_t total_simple_packets_processed = 0; +uint64_t unknown_ip_version_packets = 0; +bool process_incoming_traffic = true; +bool process_outgoing_traffic = true; +bool enable_connection_tracking = true; + +std::vector static_flowspec_based_whitelist; +packet_buckets_storage_t packet_buckets_ipv4_storage; + +total_speed_counters_t total_counters_ipv4; +total_speed_counters_t total_counters_ipv6; + +abstract_subnet_counters_t ipv6_network_counters; +abstract_subnet_counters_t ipv6_host_counters; +abstract_subnet_counters_t ipv4_network_counters; +abstract_subnet_counters_t ipv4_host_counters; + +map_of_vector_counters_for_flow_t SubnetVectorMapFlow; +std::mutex flow_counter_mutex; + + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + if (Size < 2) { + return 0; // Минимальный размер данных для обработки + } + + uint32_t client_ipv4_address = 128; + uint16_t version = (Data[0] << 8) | Data[1]; // Версия из первых двух байт + + // Вызов функции парсинга пакетов + parse_sflow_v5_packet(Data, Size, client_ipv4_address); + + return 0; // Успешное завершение обработки +} \ No newline at end of file diff --git a/src/tests/fuzz/scripts/minimize_out.sh b/src/tests/fuzz/scripts/minimize_out.sh new file mode 100755 index 00000000..c4271471 --- /dev/null +++ b/src/tests/fuzz/scripts/minimize_out.sh @@ -0,0 +1,50 @@ +#!/bin/bash +export ASAN_OPTIONS=detect_odr_violation=0:abort_on_error=1:symbolize=0 +# Check if two arguments are provided +if [ $# -ne 2 ]; then + echo "Usage: $0 " + exit 1 +fi + +# Get arguments +out_dir="$1" +binary="$2" +index=1 + +# Check if the out directory exists +if [ ! -d "$out_dir" ]; then + echo "Error: Directory $out_dir does not exist!" + exit 1 +fi + +# Check if the binary exists and is executable +if [ ! -f "$binary" ] || [ ! -x "$binary" ]; then + echo "Error: Binary file $binary does not exist or is not executable!" + exit 1 +fi + +# Create output directory if it doesn't exist +if [ ! -d "new_out" ]; then + mkdir new_out + echo "Directory new_out has been created." +fi + +# Check if there are files in the out directory +if [ -z "$(ls -A "$out_dir")" ]; then + echo "No files found in $out_dir!" + exit 1 +fi + +# Iterate over files in the out directory +for file in "$out_dir"/*/crashes/*; do + # Check if the current item is a file + if [ -f "$file" ]; then + echo "Processing file: $file" + casr-san -o "new_out/$index.casrep" -- "$binary" < "$file" + ((index++)) + else + echo "Skipped (not a file): $file" + fi +done + +casr-cluster -c "new_out" out-cluster \ No newline at end of file diff --git a/src/tests/fuzz/scripts/start_fuzz_conf_file.sh b/src/tests/fuzz/scripts/start_fuzz_conf_file.sh new file mode 100755 index 00000000..b3709f62 --- /dev/null +++ b/src/tests/fuzz/scripts/start_fuzz_conf_file.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +SESSION_NAME="config_file_fuzz" + +INPUT_DIR="./input" +OUTPUT_DIR="./output" +TARGET_PROGRAM="/src/build_fuzz/fastnetmon" + +if [ ! -d "$INPUT_DIR" ]; then + echo "Input directory '$INPUT_DIR' does not exist. Creating it..." + mkdir -p "$INPUT_DIR" +fi + +if [ ! -d "$OUTPUT_DIR" ]; then + echo "Output directory '$OUTPUT_DIR' does not exist. Creating it..." + mkdir -p "$OUTPUT_DIR" +fi + +if [ ! -f "$TARGET_PROGRAM" ]; then + echo "Target program '$TARGET_PROGRAM' not found." + exit 1 +fi + + +echo "1" >> "$INPUT_DIR"/1 +echo "a" >> "$INPUT_DIR"/2 + +tmux new-session -d -s $SESSION_NAME -n afl1 +tmux send-keys -t ${SESSION_NAME}:afl1 "afl-fuzz -i $INPUT_DIR -o $OUTPUT_DIR -m none -M master -- $TARGET_PROGRAM --configuration_check --configuration_file @@" C-m +tmux new-window -t $SESSION_NAME -n afl2 +tmux send-keys -t ${SESSION_NAME}:afl2 "afl-fuzz -i $INPUT_DIR -o $OUTPUT_DIR -m none -S fuzzer02 -- $TARGET_PROGRAM --configuration_check --configuration_file @@" C-m +tmux select-window -t ${SESSION_NAME}:afl1 +tmux attach-session -t $SESSION_NAME diff --git a/src/tests/fuzz/scripts/start_fuzz_harness.sh b/src/tests/fuzz/scripts/start_fuzz_harness.sh new file mode 100755 index 00000000..37406007 --- /dev/null +++ b/src/tests/fuzz/scripts/start_fuzz_harness.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +# Check if two arguments are provided +if [ $# -ne 1 ]; then + echo "Usage: $0 <./bin>" + exit 1 +fi +TARGET_PROGRAM="$1" + +ASAN_OPTIONS="detect_odr_violation=0:abort_on_error=1:symbolize=0" +TIME_STOP=3600 + +SESSION_NAME="process_netflow_packet_v5_fuzz" +INPUT_DIR="./input" +OUTPUT_DIR="./output" +DIR_NAME=$(basename $1)_dir +DICT="/AFLplusplus/dictionaries/pcap.dict" +MINIMIZE_SCRIPT=/src/tests/fuzz/scripts/minimize_out.sh + +if [ ! -d "$DIR_NAME" ]; then + echo "Work directory '$DIR_NAME' does not exist. Creating it..." + mkdir -p "$DIR_NAME" +fi + +cd $DIR_NAME +TARGET_PROGRAM=../"$1" + +if [ ! -d "$INPUT_DIR" ]; then + echo "Input directory '$INPUT_DIR' does not exist. Creating it..." + mkdir -p "$INPUT_DIR" +fi + +if [ ! -d "$OUTPUT_DIR" ]; then + echo "Output directory '$OUTPUT_DIR' does not exist. Creating it..." + mkdir -p "$OUTPUT_DIR" +fi + +if [ ! -f "$TARGET_PROGRAM" ]; then + echo "Target program '$TARGET_PROGRAM' not found." + exit 1 +fi + +if [ ! -x "$MINIMIZE_SCRIPT" ]; then + echo "Minimization script not found or not executable." + exit 1 +fi + +wget https://raw.githubusercontent.com/catalyst/openstack-sflow-traffic-billing/refs/heads/master/examples/sample-sflow-packet -O input/1 + +tmux new-session -d -s $SESSION_NAME -n master +tmux send-keys -t ${SESSION_NAME}:master "ASAN_OPTIONS=$ASAN_OPTIONS AFL_EXIT_ON_TIME=$TIME_STOP afl-fuzz -i $INPUT_DIR -o $OUTPUT_DIR -x $DICT -m none -M master -- ./$TARGET_PROGRAM " C-m +tmux new-window -t $SESSION_NAME -n slave +tmux send-keys -t ${SESSION_NAME}:slave "ASAN_OPTIONS=$ASAN_OPTIONS AFL_EXIT_ON_TIME=$TIME_STOP afl-fuzz -i $INPUT_DIR -o $OUTPUT_DIR -x $DICT -m none -S fuzzer02 -- ./$TARGET_PROGRAM " C-m +tmux select-window -t ${SESSION_NAME}:slave +tmux attach-session -t $SESSION_NAME + + +# Wait for all afl-fuzz processes to finish +while pgrep -x "afl-fuzz" > /dev/null; do + echo "Waiting for afl-fuzz processes to finish..." + sleep 10 +done + +echo "All afl-fuzz processes have completed. Stopping tmux sessions." +tmux kill-session -t $SESSION_NAME + +echo "Starting minimization." +$MINIMIZE_SCRIPT $OUTPUT_DIR $TARGET_PROGRAM + diff --git a/tests/Dockerfile.ubuntu-24.04-afl++ b/tests/Dockerfile.ubuntu-24.04-afl++ new file mode 100644 index 00000000..514e64f4 --- /dev/null +++ b/tests/Dockerfile.ubuntu-24.04-afl++ @@ -0,0 +1,71 @@ +FROM ubuntu:24.04 + +MAINTAINER Evgeny Shtanov + +# non-interactive +ENV DEBIAN_FRONTEND noninteractive + +# install build dependencies +RUN apt-get update +RUN apt-get install -y --no-install-recommends build-essential git ca-certificates cmake libssl-dev\ + capnproto libcapnp-dev libelf-dev libbpf-dev libpcap-dev libgrpc-dev libgrpc++-dev libprotobuf-dev\ + protobuf-compiler libprotoc-dev libprotobuf-dev protobuf-compiler-grpc libboost-dev\ + libboost-serialization-dev libboost-thread-dev libboost-regex-dev libboost-program-options-dev\ + libmongoc-dev liblog4cpp5-dev libncurses5-dev + +# absl +RUN apt-get install -y clang llvm llvm-dev lld tmux curl wget gdb vim netcat-traditional + +RUN git clone https://github.com/AFLplusplus/AFLplusplus && cd AFLplusplus && make && make install + +# Install cargo +RUN curl https://sh.rustup.rs -sSf | sh -s -- -y +RUN cp /root/.cargo/bin/* /bin/ + +# Install casr +RUN mkdir casr && git clone https://github.com/ispras/casr/ +RUN cd casr/ && cargo build --release && cargo install casr +RUN cp /root/.cargo/bin/* /bin/ + +#Install preeny for desock.so +RUN git clone https://github.com/zardus/preeny/ && \ + cd preeny && mkdir build && cd build && \ + cmake ../ && make desock && cp lib/libdesock.so /usr/lib + + +COPY src/ /src/ +WORKDIR /src + +# Prepare context +RUN cp notify_about_attack.sh /usr/local/bin/notify_about_attack.sh && \ + cp tests/fuzz/fastnetmon.conf /etc/ && \ + mkdir -p /var/log/fastnetmon_attacks + +# build dir for AFL++ fuzzing +RUN mkdir build_fuzz && cd build_fuzz && \ + cmake .. -DLINK_WITH_ABSL=ON -DENABLE_FUZZ_TEST=ON -DCMAKE_CXX_COMPILER=afl-clang-lto++ \ + -DCMAKE_CXX_FLAGS="-g -O0 -ggdb3 -fsanitize=address,bounds,undefined,null,float-divide-by-zero" && \ + make -j$(nproc) + +# build dir for libfuzzer fuzzing +RUN mkdir build_libfuzz && cd build_libfuzz && \ + cmake .. -DLINK_WITH_ABSL=ON -DENABLE_FUZZ_TEST_LIBFUZZER=ON -DCMAKE_CXX_COMPILER=clang++ \ + -DCMAKE_CXX_FLAGS="-g -O0 -ggdb3 -fsanitize=address,bounds,undefined,null,float-divide-by-zero" && \ + make -j$(nproc) + +# build dir for desock +RUN mkdir build_desock && cd build_desock && \ + cmake .. -DLINK_WITH_ABSL=ON -DENABLE_FUZZ_TEST_DESOCK=ON -DCMAKE_CXX_COMPILER=afl-clang-lto++ \ + -DCMAKE_CXX_FLAGS="-g -O0 -ggdb3 -fsanitize=address,bounds,undefined,null,float-divide-by-zero" && \ + make -j$(nproc) + +# dir for verifying crashes and debug harnesses +RUN mkdir build_debug && cd build_debug && \ + cmake .. -DCMAKE_BUILD_TYPE=Debug -DLINK_WITH_ABSL=ON -DENABLE_FUZZ_TEST=ON -DCMAKE_CXX_COMPILER=afl-clang-lto++ \ + -DCMAKE_CXX_FLAGS="-g -O0 -ggdb3" && \ + make -j$(nproc) + +# dir for verifying crashes on vanilla binary +RUN mkdir build_clean && cd build_clean && \ + cmake .. -DLINK_WITH_ABSL=ON && \ + make -j$(nproc) \ No newline at end of file