From 42290e691e3aa194821f5b926d3d4a3093b3065e Mon Sep 17 00:00:00 2001 From: Yuekai Jia Date: Thu, 22 Aug 2024 07:56:49 +0800 Subject: [PATCH] Initial commit --- .clang-format | 16 + .github/workflows/build.yml | 30 + .gitignore | 17 + .gitmodules | 8 + CMakeLists.txt | 93 ++ Makefile | 60 + README.md | 29 + apps/CMakeLists.txt | 55 + apps/bench.c | 79 ++ apps/bench_app_switch.c | 78 ++ apps/bench_common.h | 13 + apps/bench_pthread.c | 43 + apps/dpdk_netperf/.gitignore | 1 + apps/dpdk_netperf/Makefile | 44 + apps/dpdk_netperf/README.md | 52 + apps/dpdk_netperf/dpdk_netperf.c | 757 +++++++++++ apps/fakework.c | 85 ++ apps/hello.c | 42 + apps/hello_shim.c | 32 + apps/memcached | 1 + apps/schbench | 1 + apps/test_net.c | 50 + apps/test_rcu.c | 131 ++ apps/test_timer.c | 61 + docs/imgs/overview.drawio | 796 ++++++++++++ docs/imgs/overview.jpg | Bin 0 -> 114085 bytes docs/imgs/uintr.drawio | 87 ++ docs/imgs/uintr.svg | 4 + docs/sosp24-ae.md | 416 ++++++ include/net/arp.h | 46 + include/net/cksum.h | 138 ++ include/net/ethernet.h | 339 +++++ include/net/icmp.h | 203 +++ include/net/ip.h | 74 ++ include/net/mbuf.h | 355 ++++++ include/net/ping.h | 6 + include/net/tcp.h | 46 + include/net/udp.h | 14 + include/skyloft/global.h | 33 + include/skyloft/io.h | 137 ++ include/skyloft/mm.h | 11 + include/skyloft/mm/mempool.h | 59 + include/skyloft/mm/page.h | 227 ++++ include/skyloft/mm/slab.h | 80 ++ include/skyloft/mm/smalloc.h | 47 + include/skyloft/mm/stack.h | 27 + include/skyloft/mm/tcache.h | 103 ++ include/skyloft/net.h | 189 +++ include/skyloft/percpu.h | 78 ++ include/skyloft/platform.h | 122 ++ include/skyloft/sched.h | 192 +++ include/skyloft/sched/ops.h | 69 + include/skyloft/sched/policy/cfs.h | 87 ++ include/skyloft/sched/policy/dummy.h | 21 + include/skyloft/sched/policy/fifo.h | 66 + include/skyloft/sched/policy/rr.h | 53 + include/skyloft/sched/policy/sq.h | 104 ++ include/skyloft/sched/policy/sq_lcbe.h | 125 ++ include/skyloft/stat.h | 86 ++ include/skyloft/sync.h | 7 + include/skyloft/sync/rcu.h | 117 ++ include/skyloft/sync/rculist.h | 73 ++ include/skyloft/sync/signal.h | 7 + include/skyloft/sync/sync.h | 143 +++ include/skyloft/sync/timer.h | 62 + include/skyloft/task.h | 79 ++ include/skyloft/tcp.h | 30 + include/skyloft/uapi/dev.h | 18 + include/skyloft/uapi/pthread.h | 41 + include/skyloft/uapi/task.h | 71 ++ include/skyloft/udp.h | 74 ++ kmod/Makefile | 35 + kmod/main.c | 319 +++++ libos/CMakeLists.txt | 69 + libos/exit.c | 50 + libos/io/cmd.c | 61 + libos/io/completion.c | 94 ++ libos/io/dpdk.c | 224 ++++ libos/io/main.c | 177 +++ libos/io/rx.c | 158 +++ libos/io/tx.c | 304 +++++ libos/libos.c | 270 ++++ libos/libos.ld | 11 + libos/mm/mempool.c | 160 +++ libos/mm/page.c | 396 ++++++ libos/mm/slab.c | 508 ++++++++ libos/mm/smalloc.c | 127 ++ libos/mm/stack.c | 113 ++ libos/mm/tcache.c | 240 ++++ libos/net/arp.c | 396 ++++++ libos/net/core.c | 526 ++++++++ libos/net/dump.c | 101 ++ libos/net/icmp.c | 96 ++ libos/net/mbuf.c | 29 + libos/net/ping.c | 68 + libos/net/tcp.c | 1115 +++++++++++++++++ libos/net/tcp.h | 184 +++ libos/net/tcp_in.c | 509 ++++++++ libos/net/tcp_out.c | 389 ++++++ libos/net/transport.c | 253 ++++ libos/net/udp.c | 686 ++++++++++ libos/net/waitq.h | 117 ++ libos/percpu.c | 56 + libos/platform/cpu.c | 105 ++ libos/platform/dev.c | 97 ++ libos/platform/mem.c | 313 +++++ libos/platform/mod.c | 33 + libos/platform/uintr.c | 52 + libos/sched/policy/cfs.c | 480 +++++++ libos/sched/policy/fifo.c | 132 ++ libos/sched/policy/rr.c | 101 ++ libos/sched/policy/sq.c | 165 +++ libos/sched/policy/sq_lcbe.c | 455 +++++++ libos/sched/sched.c | 438 +++++++ libos/sched/softirq.c | 151 +++ libos/sched/switch.S | 73 ++ libos/sched/task.c | 244 ++++ libos/shim/entry.c | 30 + libos/shim/hook.h | 64 + libos/shim/pthread.c | 165 +++ libos/stat.c | 20 + libos/sync/rcu.c | 150 +++ libos/sync/signal.c | 25 + libos/sync/sync.c | 329 +++++ libos/sync/timer.c | 340 +++++ microbench/.gitignore | 7 + microbench/Makefile | 15 + microbench/common.h | 9 + microbench/kipi_send_recv.c | 122 ++ microbench/setitimer_recv.c | 69 + microbench/signal_delivery.c | 85 ++ microbench/signal_send_recv.c | 95 ++ microbench/uipi_delivery.c | 123 ++ microbench/uipi_send_recv.c | 139 ++ microbench/utimer_recv.c | 100 ++ paper_results/memcached/USR/shenango | 30 + paper_results/memcached/USR/skyloft | 30 + paper_results/schbench/linux_cfs/112.txt | 76 ++ paper_results/schbench/linux_cfs/128.txt | 76 ++ paper_results/schbench/linux_cfs/144.txt | 76 ++ paper_results/schbench/linux_cfs/16.txt | 76 ++ paper_results/schbench/linux_cfs/160.txt | 76 ++ paper_results/schbench/linux_cfs/176.txt | 76 ++ paper_results/schbench/linux_cfs/192.txt | 76 ++ paper_results/schbench/linux_cfs/208.txt | 76 ++ paper_results/schbench/linux_cfs/224.txt | 76 ++ paper_results/schbench/linux_cfs/24.txt | 76 ++ paper_results/schbench/linux_cfs/240.txt | 76 ++ paper_results/schbench/linux_cfs/256.txt | 76 ++ paper_results/schbench/linux_cfs/32.txt | 76 ++ paper_results/schbench/linux_cfs/4.txt | 76 ++ paper_results/schbench/linux_cfs/40.txt | 76 ++ paper_results/schbench/linux_cfs/48.txt | 76 ++ paper_results/schbench/linux_cfs/56.txt | 72 ++ paper_results/schbench/linux_cfs/64.txt | 76 ++ paper_results/schbench/linux_cfs/72.txt | 76 ++ paper_results/schbench/linux_cfs/8.txt | 76 ++ paper_results/schbench/linux_cfs/80.txt | 76 ++ paper_results/schbench/linux_cfs/88.txt | 72 ++ paper_results/schbench/linux_cfs/96.txt | 76 ++ paper_results/schbench/linux_cfs/all.csv | 24 + paper_results/schbench/linux_fifo/112.txt | 76 ++ paper_results/schbench/linux_fifo/128.txt | 76 ++ paper_results/schbench/linux_fifo/144.txt | 76 ++ paper_results/schbench/linux_fifo/16.txt | 76 ++ paper_results/schbench/linux_fifo/160.txt | 76 ++ paper_results/schbench/linux_fifo/176.txt | 76 ++ paper_results/schbench/linux_fifo/192.txt | 76 ++ paper_results/schbench/linux_fifo/208.txt | 76 ++ paper_results/schbench/linux_fifo/224.txt | 76 ++ paper_results/schbench/linux_fifo/24.txt | 76 ++ paper_results/schbench/linux_fifo/240.txt | 76 ++ paper_results/schbench/linux_fifo/256.txt | 76 ++ paper_results/schbench/linux_fifo/32.txt | 76 ++ paper_results/schbench/linux_fifo/4.txt | 76 ++ paper_results/schbench/linux_fifo/40.txt | 76 ++ paper_results/schbench/linux_fifo/48.txt | 76 ++ paper_results/schbench/linux_fifo/64.txt | 76 ++ paper_results/schbench/linux_fifo/72.txt | 76 ++ paper_results/schbench/linux_fifo/8.txt | 76 ++ paper_results/schbench/linux_fifo/80.txt | 76 ++ paper_results/schbench/linux_fifo/96.txt | 76 ++ paper_results/schbench/linux_fifo/all.csv | 22 + paper_results/schbench/linux_rr/112.txt | 76 ++ paper_results/schbench/linux_rr/128.txt | 76 ++ paper_results/schbench/linux_rr/144.txt | 76 ++ paper_results/schbench/linux_rr/16.txt | 76 ++ paper_results/schbench/linux_rr/160.txt | 76 ++ paper_results/schbench/linux_rr/176.txt | 76 ++ paper_results/schbench/linux_rr/192.txt | 76 ++ paper_results/schbench/linux_rr/208.txt | 76 ++ paper_results/schbench/linux_rr/224.txt | 76 ++ paper_results/schbench/linux_rr/24.txt | 76 ++ paper_results/schbench/linux_rr/240.txt | 76 ++ paper_results/schbench/linux_rr/256.txt | 76 ++ paper_results/schbench/linux_rr/32.txt | 76 ++ paper_results/schbench/linux_rr/4.txt | 76 ++ paper_results/schbench/linux_rr/40.txt | 76 ++ paper_results/schbench/linux_rr/48.txt | 76 ++ paper_results/schbench/linux_rr/56.txt | 36 + paper_results/schbench/linux_rr/64.txt | 76 ++ paper_results/schbench/linux_rr/72.txt | 76 ++ paper_results/schbench/linux_rr/8.txt | 76 ++ paper_results/schbench/linux_rr/80.txt | 76 ++ paper_results/schbench/linux_rr/88.txt | 36 + paper_results/schbench/linux_rr/96.txt | 76 ++ paper_results/schbench/linux_rr/all.csv | 24 + paper_results/schbench/skyloft_cfs50us/16.txt | 18 + paper_results/schbench/skyloft_cfs50us/24.txt | 18 + paper_results/schbench/skyloft_cfs50us/32.txt | 18 + paper_results/schbench/skyloft_cfs50us/4.txt | 18 + paper_results/schbench/skyloft_cfs50us/40.txt | 18 + paper_results/schbench/skyloft_cfs50us/48.txt | 18 + paper_results/schbench/skyloft_cfs50us/56.txt | 18 + paper_results/schbench/skyloft_cfs50us/64.txt | 18 + paper_results/schbench/skyloft_cfs50us/72.txt | 18 + paper_results/schbench/skyloft_cfs50us/8.txt | 18 + paper_results/schbench/skyloft_cfs50us/80.txt | 18 + paper_results/schbench/skyloft_cfs50us/88.txt | 18 + paper_results/schbench/skyloft_cfs50us/96.txt | 18 + .../schbench/skyloft_cfs50us/all.csv | 14 + paper_results/schbench/skyloft_fifo/112.txt | 73 ++ paper_results/schbench/skyloft_fifo/128.txt | 73 ++ paper_results/schbench/skyloft_fifo/144.txt | 73 ++ paper_results/schbench/skyloft_fifo/16.txt | 73 ++ paper_results/schbench/skyloft_fifo/160.txt | 73 ++ paper_results/schbench/skyloft_fifo/176.txt | 73 ++ paper_results/schbench/skyloft_fifo/192.txt | 36 + paper_results/schbench/skyloft_fifo/208.txt | 73 ++ paper_results/schbench/skyloft_fifo/224.txt | 73 ++ paper_results/schbench/skyloft_fifo/24.txt | 73 ++ paper_results/schbench/skyloft_fifo/240.txt | 73 ++ paper_results/schbench/skyloft_fifo/256.txt | 73 ++ paper_results/schbench/skyloft_fifo/32.txt | 73 ++ paper_results/schbench/skyloft_fifo/4.txt | 73 ++ paper_results/schbench/skyloft_fifo/40.txt | 73 ++ paper_results/schbench/skyloft_fifo/48.txt | 36 + paper_results/schbench/skyloft_fifo/64.txt | 73 ++ paper_results/schbench/skyloft_fifo/72.txt | 36 + paper_results/schbench/skyloft_fifo/8.txt | 73 ++ paper_results/schbench/skyloft_fifo/80.txt | 73 ++ paper_results/schbench/skyloft_fifo/96.txt | 73 ++ paper_results/schbench/skyloft_fifo/all.csv | 22 + paper_results/schbench/skyloft_rr1ms/112.txt | Bin 0 -> 5520 bytes paper_results/schbench/skyloft_rr1ms/128.txt | 37 + paper_results/schbench/skyloft_rr1ms/144.txt | Bin 0 -> 5524 bytes paper_results/schbench/skyloft_rr1ms/16.txt | Bin 0 -> 6146 bytes paper_results/schbench/skyloft_rr1ms/160.txt | Bin 0 -> 5524 bytes paper_results/schbench/skyloft_rr1ms/176.txt | Bin 0 -> 5520 bytes paper_results/schbench/skyloft_rr1ms/192.txt | Bin 0 -> 5521 bytes paper_results/schbench/skyloft_rr1ms/208.txt | Bin 0 -> 5524 bytes paper_results/schbench/skyloft_rr1ms/224.txt | Bin 0 -> 5523 bytes paper_results/schbench/skyloft_rr1ms/24.txt | Bin 0 -> 5501 bytes paper_results/schbench/skyloft_rr1ms/240.txt | 0 paper_results/schbench/skyloft_rr1ms/32.txt | Bin 0 -> 5512 bytes paper_results/schbench/skyloft_rr1ms/4.txt | Bin 0 -> 6127 bytes paper_results/schbench/skyloft_rr1ms/40.txt | Bin 0 -> 5515 bytes paper_results/schbench/skyloft_rr1ms/48.txt | 37 + paper_results/schbench/skyloft_rr1ms/64.txt | Bin 0 -> 5524 bytes paper_results/schbench/skyloft_rr1ms/72.txt | Bin 0 -> 5522 bytes paper_results/schbench/skyloft_rr1ms/8.txt | 55 + paper_results/schbench/skyloft_rr1ms/80.txt | 37 + paper_results/schbench/skyloft_rr1ms/96.txt | 37 + paper_results/schbench/skyloft_rr1ms/all.csv | 20 + .../schbench/skyloft_rr200us/112.txt | Bin 0 -> 5513 bytes .../schbench/skyloft_rr200us/128.txt | 37 + .../schbench/skyloft_rr200us/144.txt | Bin 0 -> 5515 bytes paper_results/schbench/skyloft_rr200us/16.txt | Bin 0 -> 6141 bytes .../schbench/skyloft_rr200us/160.txt | 37 + .../schbench/skyloft_rr200us/176.txt | Bin 0 -> 5515 bytes .../schbench/skyloft_rr200us/192.txt | Bin 0 -> 5518 bytes .../schbench/skyloft_rr200us/208.txt | Bin 0 -> 5516 bytes .../schbench/skyloft_rr200us/224.txt | Bin 0 -> 5515 bytes paper_results/schbench/skyloft_rr200us/24.txt | 37 + .../schbench/skyloft_rr200us/240.txt | Bin 0 -> 5517 bytes .../schbench/skyloft_rr200us/256.txt | Bin 0 -> 5516 bytes paper_results/schbench/skyloft_rr200us/32.txt | Bin 0 -> 5510 bytes paper_results/schbench/skyloft_rr200us/4.txt | 55 + paper_results/schbench/skyloft_rr200us/40.txt | Bin 0 -> 5512 bytes paper_results/schbench/skyloft_rr200us/48.txt | Bin 0 -> 5512 bytes paper_results/schbench/skyloft_rr200us/64.txt | Bin 0 -> 5511 bytes paper_results/schbench/skyloft_rr200us/72.txt | Bin 0 -> 5513 bytes paper_results/schbench/skyloft_rr200us/8.txt | Bin 0 -> 6145 bytes paper_results/schbench/skyloft_rr200us/80.txt | Bin 0 -> 5512 bytes paper_results/schbench/skyloft_rr200us/96.txt | Bin 0 -> 5516 bytes .../schbench/skyloft_rr200us/all.csv | 22 + paper_results/schbench/skyloft_rr50us/112.txt | 17 + paper_results/schbench/skyloft_rr50us/128.txt | Bin 0 -> 5520 bytes paper_results/schbench/skyloft_rr50us/144.txt | Bin 0 -> 5518 bytes paper_results/schbench/skyloft_rr50us/16.txt | Bin 0 -> 5475 bytes paper_results/schbench/skyloft_rr50us/160.txt | 17 + paper_results/schbench/skyloft_rr50us/176.txt | Bin 0 -> 5518 bytes paper_results/schbench/skyloft_rr50us/192.txt | 37 + paper_results/schbench/skyloft_rr50us/208.txt | 19 + paper_results/schbench/skyloft_rr50us/224.txt | 18 + paper_results/schbench/skyloft_rr50us/24.txt | 37 + paper_results/schbench/skyloft_rr50us/240.txt | Bin 0 -> 5519 bytes paper_results/schbench/skyloft_rr50us/256.txt | 37 + paper_results/schbench/skyloft_rr50us/32.txt | Bin 0 -> 5500 bytes paper_results/schbench/skyloft_rr50us/4.txt | Bin 0 -> 5461 bytes paper_results/schbench/skyloft_rr50us/40.txt | 36 + paper_results/schbench/skyloft_rr50us/48.txt | 36 + paper_results/schbench/skyloft_rr50us/56.txt | 54 + paper_results/schbench/skyloft_rr50us/64.txt | 37 + paper_results/schbench/skyloft_rr50us/72.txt | 18 + paper_results/schbench/skyloft_rr50us/8.txt | Bin 0 -> 5473 bytes paper_results/schbench/skyloft_rr50us/80.txt | 16 + paper_results/schbench/skyloft_rr50us/88.txt | 54 + paper_results/schbench/skyloft_rr50us/96.txt | 17 + paper_results/schbench/skyloft_rr50us/all.csv | 24 + .../synthetic/99.5-4-0.5-10000-lcbe/cfs-be | 9 + .../synthetic/99.5-4-0.5-10000-lcbe/cfs-lc | 8 + .../99.5-4-0.5-10000-lcbe/ghost-20us-be | 42 + .../99.5-4-0.5-10000-lcbe/ghost-20us-lc | 42 + .../99.5-4-0.5-10000-lcbe/ghost-30us-be | 10 + .../99.5-4-0.5-10000-lcbe/ghost-30us-lc | 11 + .../99.5-4-0.5-10000-lcbe/shinjuku-30us-be | 12 + .../99.5-4-0.5-10000-lcbe/shinjuku-30us-lc | 13 + .../99.5-4-0.5-10000-lcbe/skyloft-20us-be | 14 + .../99.5-4-0.5-10000-lcbe/skyloft-20us-lc | 14 + .../99.5-4-0.5-10000-lcbe/skyloft-30us-be | 10 + .../99.5-4-0.5-10000-lcbe/skyloft-30us-lc | 15 + paper_results/synthetic/99.5-4-0.5-10000/cfs | 8 + .../synthetic/99.5-4-0.5-10000/ghost-100us | 46 + .../synthetic/99.5-4-0.5-10000/ghost-10us | 17 + .../synthetic/99.5-4-0.5-10000/ghost-20us | 42 + .../synthetic/99.5-4-0.5-10000/ghost-30us | 9 + .../synthetic/99.5-4-0.5-10000/shinjuku-10us | 36 + .../synthetic/99.5-4-0.5-10000/shinjuku-30us | 13 + .../synthetic/99.5-4-0.5-10000/shinjuku-5us | 36 + .../synthetic/99.5-4-0.5-10000/skyloft-10us | 10 + .../synthetic/99.5-4-0.5-10000/skyloft-20us | 17 + .../synthetic/99.5-4-0.5-10000/skyloft-30us | 17 + .../synthetic/99.5-4-0.5-10000/skyloft-inf | 13 + params.h.in | 66 + requirements.txt | 1 + scripts/.gitignore | 1 + scripts/bench/schbench.sh | 29 + scripts/build.sh | 48 + scripts/cmake/rocksdb.mk | 32 + scripts/disable_cpufreq_scaling.sh | 33 + scripts/install_deps.sh | 8 + scripts/make_rootfs.sh | 43 + scripts/params/default.params | 66 + scripts/params/memcached.params | 4 + scripts/params/rocksdb-server-10us.params | 4 + scripts/params/rocksdb-server-20us.params | 4 + scripts/params/rocksdb-server-5us.params | 4 + scripts/params/rocksdb-server.params | 3 + scripts/params/schbench-cfs-50us.params | 3 + scripts/params/schbench-rr-1ms.params | 4 + scripts/params/schbench-rr-200us.params | 4 + scripts/params/schbench-rr-50us.params | 4 + scripts/params/schbench-rr.params | 2 + scripts/params/shinjuku.params | 2 + scripts/params/thread.params | 2 + scripts/plots/common.py | 11 + scripts/plots/plot_memcached.py | 115 ++ scripts/plots/plot_schbench.py | 107 ++ scripts/plots/plot_schbench2.py | 120 ++ scripts/plots/plot_synthetic.py | 146 +++ scripts/plots/requirements.txt | 2 + scripts/run.sh | 15 + scripts/run_experiments.sh | 19 + scripts/run_shinjuku.sh | 26 + scripts/run_synthetic_lc.sh | 35 + scripts/run_synthetic_lcbe.sh | 51 + scripts/setup_host.sh | 15 + synthetic/CMakeLists.txt | 32 + synthetic/antagonist/main.cc | 114 ++ synthetic/rocksdb/common.cc | 411 ++++++ synthetic/rocksdb/common.h | 97 ++ synthetic/rocksdb/jbsq.h | 23 + synthetic/rocksdb/native.cc | 301 +++++ synthetic/rocksdb/random.cc | 47 + synthetic/rocksdb/random.h | 13 + synthetic/rocksdb/shinjuku.cc | 202 +++ synthetic/rocksdb/shinjuku_old.cc | 311 +++++ utils/CMakeLists.txt | 5 + utils/bitmap.c | 52 + utils/include/utils/assert.h | 130 ++ utils/include/utils/atomic.h | 18 + utils/include/utils/bitmap.h | 127 ++ utils/include/utils/byteorder.h | 93 ++ utils/include/utils/cksum.h | 75 ++ utils/include/utils/cpu.h | 13 + utils/include/utils/defs.h | 350 ++++++ utils/include/utils/gen.h | 62 + utils/include/utils/hash.h | 154 +++ utils/include/utils/init.h | 30 + utils/include/utils/kref.h | 72 ++ utils/include/utils/list.h | 736 +++++++++++ utils/include/utils/log.h | 101 ++ utils/include/utils/lrpc.h | 131 ++ utils/include/utils/msgq.h | 99 ++ utils/include/utils/ops.h | 18 + utils/include/utils/queue.h | 112 ++ utils/include/utils/rbtree.h | 678 ++++++++++ utils/include/utils/shm.h | 57 + utils/include/utils/spinlock.h | 80 ++ utils/include/utils/syscalls.h | 9 + utils/include/utils/time.h | 66 + utils/include/utils/types.h | 11 + utils/include/utils/uintr.h | 43 + utils/list.c | 40 + utils/log.c | 85 ++ utils/lrpc.c | 27 + utils/rbtree.c | 595 +++++++++ 408 files changed, 34331 insertions(+) create mode 100644 .clang-format create mode 100644 .github/workflows/build.yml create mode 100644 .gitignore create mode 100644 .gitmodules create mode 100644 CMakeLists.txt create mode 100644 Makefile create mode 100644 README.md create mode 100644 apps/CMakeLists.txt create mode 100644 apps/bench.c create mode 100644 apps/bench_app_switch.c create mode 100644 apps/bench_common.h create mode 100644 apps/bench_pthread.c create mode 100644 apps/dpdk_netperf/.gitignore create mode 100644 apps/dpdk_netperf/Makefile create mode 100644 apps/dpdk_netperf/README.md create mode 100644 apps/dpdk_netperf/dpdk_netperf.c create mode 100644 apps/fakework.c create mode 100644 apps/hello.c create mode 100644 apps/hello_shim.c create mode 160000 apps/memcached create mode 160000 apps/schbench create mode 100644 apps/test_net.c create mode 100644 apps/test_rcu.c create mode 100644 apps/test_timer.c create mode 100644 docs/imgs/overview.drawio create mode 100644 docs/imgs/overview.jpg create mode 100644 docs/imgs/uintr.drawio create mode 100644 docs/imgs/uintr.svg create mode 100644 docs/sosp24-ae.md create mode 100644 include/net/arp.h create mode 100644 include/net/cksum.h create mode 100644 include/net/ethernet.h create mode 100644 include/net/icmp.h create mode 100644 include/net/ip.h create mode 100644 include/net/mbuf.h create mode 100644 include/net/ping.h create mode 100644 include/net/tcp.h create mode 100644 include/net/udp.h create mode 100644 include/skyloft/global.h create mode 100644 include/skyloft/io.h create mode 100644 include/skyloft/mm.h create mode 100644 include/skyloft/mm/mempool.h create mode 100644 include/skyloft/mm/page.h create mode 100644 include/skyloft/mm/slab.h create mode 100644 include/skyloft/mm/smalloc.h create mode 100644 include/skyloft/mm/stack.h create mode 100644 include/skyloft/mm/tcache.h create mode 100644 include/skyloft/net.h create mode 100644 include/skyloft/percpu.h create mode 100644 include/skyloft/platform.h create mode 100644 include/skyloft/sched.h create mode 100644 include/skyloft/sched/ops.h create mode 100644 include/skyloft/sched/policy/cfs.h create mode 100644 include/skyloft/sched/policy/dummy.h create mode 100644 include/skyloft/sched/policy/fifo.h create mode 100644 include/skyloft/sched/policy/rr.h create mode 100644 include/skyloft/sched/policy/sq.h create mode 100644 include/skyloft/sched/policy/sq_lcbe.h create mode 100644 include/skyloft/stat.h create mode 100644 include/skyloft/sync.h create mode 100644 include/skyloft/sync/rcu.h create mode 100644 include/skyloft/sync/rculist.h create mode 100644 include/skyloft/sync/signal.h create mode 100644 include/skyloft/sync/sync.h create mode 100644 include/skyloft/sync/timer.h create mode 100644 include/skyloft/task.h create mode 100644 include/skyloft/tcp.h create mode 100644 include/skyloft/uapi/dev.h create mode 100644 include/skyloft/uapi/pthread.h create mode 100644 include/skyloft/uapi/task.h create mode 100644 include/skyloft/udp.h create mode 100644 kmod/Makefile create mode 100644 kmod/main.c create mode 100644 libos/CMakeLists.txt create mode 100644 libos/exit.c create mode 100644 libos/io/cmd.c create mode 100644 libos/io/completion.c create mode 100644 libos/io/dpdk.c create mode 100644 libos/io/main.c create mode 100644 libos/io/rx.c create mode 100644 libos/io/tx.c create mode 100644 libos/libos.c create mode 100644 libos/libos.ld create mode 100644 libos/mm/mempool.c create mode 100644 libos/mm/page.c create mode 100644 libos/mm/slab.c create mode 100644 libos/mm/smalloc.c create mode 100644 libos/mm/stack.c create mode 100644 libos/mm/tcache.c create mode 100644 libos/net/arp.c create mode 100644 libos/net/core.c create mode 100644 libos/net/dump.c create mode 100644 libos/net/icmp.c create mode 100644 libos/net/mbuf.c create mode 100644 libos/net/ping.c create mode 100644 libos/net/tcp.c create mode 100644 libos/net/tcp.h create mode 100644 libos/net/tcp_in.c create mode 100644 libos/net/tcp_out.c create mode 100644 libos/net/transport.c create mode 100644 libos/net/udp.c create mode 100644 libos/net/waitq.h create mode 100644 libos/percpu.c create mode 100644 libos/platform/cpu.c create mode 100644 libos/platform/dev.c create mode 100644 libos/platform/mem.c create mode 100644 libos/platform/mod.c create mode 100644 libos/platform/uintr.c create mode 100644 libos/sched/policy/cfs.c create mode 100644 libos/sched/policy/fifo.c create mode 100644 libos/sched/policy/rr.c create mode 100644 libos/sched/policy/sq.c create mode 100644 libos/sched/policy/sq_lcbe.c create mode 100644 libos/sched/sched.c create mode 100644 libos/sched/softirq.c create mode 100644 libos/sched/switch.S create mode 100644 libos/sched/task.c create mode 100644 libos/shim/entry.c create mode 100644 libos/shim/hook.h create mode 100644 libos/shim/pthread.c create mode 100644 libos/stat.c create mode 100644 libos/sync/rcu.c create mode 100644 libos/sync/signal.c create mode 100644 libos/sync/sync.c create mode 100644 libos/sync/timer.c create mode 100644 microbench/.gitignore create mode 100644 microbench/Makefile create mode 100644 microbench/common.h create mode 100644 microbench/kipi_send_recv.c create mode 100644 microbench/setitimer_recv.c create mode 100644 microbench/signal_delivery.c create mode 100644 microbench/signal_send_recv.c create mode 100644 microbench/uipi_delivery.c create mode 100644 microbench/uipi_send_recv.c create mode 100644 microbench/utimer_recv.c create mode 100644 paper_results/memcached/USR/shenango create mode 100644 paper_results/memcached/USR/skyloft create mode 100644 paper_results/schbench/linux_cfs/112.txt create mode 100644 paper_results/schbench/linux_cfs/128.txt create mode 100644 paper_results/schbench/linux_cfs/144.txt create mode 100644 paper_results/schbench/linux_cfs/16.txt create mode 100644 paper_results/schbench/linux_cfs/160.txt create mode 100644 paper_results/schbench/linux_cfs/176.txt create mode 100644 paper_results/schbench/linux_cfs/192.txt create mode 100644 paper_results/schbench/linux_cfs/208.txt create mode 100644 paper_results/schbench/linux_cfs/224.txt create mode 100644 paper_results/schbench/linux_cfs/24.txt create mode 100644 paper_results/schbench/linux_cfs/240.txt create mode 100644 paper_results/schbench/linux_cfs/256.txt create mode 100644 paper_results/schbench/linux_cfs/32.txt create mode 100644 paper_results/schbench/linux_cfs/4.txt create mode 100644 paper_results/schbench/linux_cfs/40.txt create mode 100644 paper_results/schbench/linux_cfs/48.txt create mode 100644 paper_results/schbench/linux_cfs/56.txt create mode 100644 paper_results/schbench/linux_cfs/64.txt create mode 100644 paper_results/schbench/linux_cfs/72.txt create mode 100644 paper_results/schbench/linux_cfs/8.txt create mode 100644 paper_results/schbench/linux_cfs/80.txt create mode 100644 paper_results/schbench/linux_cfs/88.txt create mode 100644 paper_results/schbench/linux_cfs/96.txt create mode 100644 paper_results/schbench/linux_cfs/all.csv create mode 100644 paper_results/schbench/linux_fifo/112.txt create mode 100644 paper_results/schbench/linux_fifo/128.txt create mode 100644 paper_results/schbench/linux_fifo/144.txt create mode 100644 paper_results/schbench/linux_fifo/16.txt create mode 100644 paper_results/schbench/linux_fifo/160.txt create mode 100644 paper_results/schbench/linux_fifo/176.txt create mode 100644 paper_results/schbench/linux_fifo/192.txt create mode 100644 paper_results/schbench/linux_fifo/208.txt create mode 100644 paper_results/schbench/linux_fifo/224.txt create mode 100644 paper_results/schbench/linux_fifo/24.txt create mode 100644 paper_results/schbench/linux_fifo/240.txt create mode 100644 paper_results/schbench/linux_fifo/256.txt create mode 100644 paper_results/schbench/linux_fifo/32.txt create mode 100644 paper_results/schbench/linux_fifo/4.txt create mode 100644 paper_results/schbench/linux_fifo/40.txt create mode 100644 paper_results/schbench/linux_fifo/48.txt create mode 100644 paper_results/schbench/linux_fifo/64.txt create mode 100644 paper_results/schbench/linux_fifo/72.txt create mode 100644 paper_results/schbench/linux_fifo/8.txt create mode 100644 paper_results/schbench/linux_fifo/80.txt create mode 100644 paper_results/schbench/linux_fifo/96.txt create mode 100644 paper_results/schbench/linux_fifo/all.csv create mode 100644 paper_results/schbench/linux_rr/112.txt create mode 100644 paper_results/schbench/linux_rr/128.txt create mode 100644 paper_results/schbench/linux_rr/144.txt create mode 100644 paper_results/schbench/linux_rr/16.txt create mode 100644 paper_results/schbench/linux_rr/160.txt create mode 100644 paper_results/schbench/linux_rr/176.txt create mode 100644 paper_results/schbench/linux_rr/192.txt create mode 100644 paper_results/schbench/linux_rr/208.txt create mode 100644 paper_results/schbench/linux_rr/224.txt create mode 100644 paper_results/schbench/linux_rr/24.txt create mode 100644 paper_results/schbench/linux_rr/240.txt create mode 100644 paper_results/schbench/linux_rr/256.txt create mode 100644 paper_results/schbench/linux_rr/32.txt create mode 100644 paper_results/schbench/linux_rr/4.txt create mode 100644 paper_results/schbench/linux_rr/40.txt create mode 100644 paper_results/schbench/linux_rr/48.txt create mode 100644 paper_results/schbench/linux_rr/56.txt create mode 100644 paper_results/schbench/linux_rr/64.txt create mode 100644 paper_results/schbench/linux_rr/72.txt create mode 100644 paper_results/schbench/linux_rr/8.txt create mode 100644 paper_results/schbench/linux_rr/80.txt create mode 100644 paper_results/schbench/linux_rr/88.txt create mode 100644 paper_results/schbench/linux_rr/96.txt create mode 100644 paper_results/schbench/linux_rr/all.csv create mode 100644 paper_results/schbench/skyloft_cfs50us/16.txt create mode 100644 paper_results/schbench/skyloft_cfs50us/24.txt create mode 100644 paper_results/schbench/skyloft_cfs50us/32.txt create mode 100644 paper_results/schbench/skyloft_cfs50us/4.txt create mode 100644 paper_results/schbench/skyloft_cfs50us/40.txt create mode 100644 paper_results/schbench/skyloft_cfs50us/48.txt create mode 100644 paper_results/schbench/skyloft_cfs50us/56.txt create mode 100644 paper_results/schbench/skyloft_cfs50us/64.txt create mode 100644 paper_results/schbench/skyloft_cfs50us/72.txt create mode 100644 paper_results/schbench/skyloft_cfs50us/8.txt create mode 100644 paper_results/schbench/skyloft_cfs50us/80.txt create mode 100644 paper_results/schbench/skyloft_cfs50us/88.txt create mode 100644 paper_results/schbench/skyloft_cfs50us/96.txt create mode 100644 paper_results/schbench/skyloft_cfs50us/all.csv create mode 100644 paper_results/schbench/skyloft_fifo/112.txt create mode 100644 paper_results/schbench/skyloft_fifo/128.txt create mode 100644 paper_results/schbench/skyloft_fifo/144.txt create mode 100644 paper_results/schbench/skyloft_fifo/16.txt create mode 100644 paper_results/schbench/skyloft_fifo/160.txt create mode 100644 paper_results/schbench/skyloft_fifo/176.txt create mode 100644 paper_results/schbench/skyloft_fifo/192.txt create mode 100644 paper_results/schbench/skyloft_fifo/208.txt create mode 100644 paper_results/schbench/skyloft_fifo/224.txt create mode 100644 paper_results/schbench/skyloft_fifo/24.txt create mode 100644 paper_results/schbench/skyloft_fifo/240.txt create mode 100644 paper_results/schbench/skyloft_fifo/256.txt create mode 100644 paper_results/schbench/skyloft_fifo/32.txt create mode 100644 paper_results/schbench/skyloft_fifo/4.txt create mode 100644 paper_results/schbench/skyloft_fifo/40.txt create mode 100644 paper_results/schbench/skyloft_fifo/48.txt create mode 100644 paper_results/schbench/skyloft_fifo/64.txt create mode 100644 paper_results/schbench/skyloft_fifo/72.txt create mode 100644 paper_results/schbench/skyloft_fifo/8.txt create mode 100644 paper_results/schbench/skyloft_fifo/80.txt create mode 100644 paper_results/schbench/skyloft_fifo/96.txt create mode 100644 paper_results/schbench/skyloft_fifo/all.csv create mode 100644 paper_results/schbench/skyloft_rr1ms/112.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/128.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/144.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/16.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/160.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/176.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/192.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/208.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/224.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/24.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/240.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/32.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/4.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/40.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/48.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/64.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/72.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/8.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/80.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/96.txt create mode 100644 paper_results/schbench/skyloft_rr1ms/all.csv create mode 100644 paper_results/schbench/skyloft_rr200us/112.txt create mode 100644 paper_results/schbench/skyloft_rr200us/128.txt create mode 100644 paper_results/schbench/skyloft_rr200us/144.txt create mode 100644 paper_results/schbench/skyloft_rr200us/16.txt create mode 100644 paper_results/schbench/skyloft_rr200us/160.txt create mode 100644 paper_results/schbench/skyloft_rr200us/176.txt create mode 100644 paper_results/schbench/skyloft_rr200us/192.txt create mode 100644 paper_results/schbench/skyloft_rr200us/208.txt create mode 100644 paper_results/schbench/skyloft_rr200us/224.txt create mode 100644 paper_results/schbench/skyloft_rr200us/24.txt create mode 100644 paper_results/schbench/skyloft_rr200us/240.txt create mode 100644 paper_results/schbench/skyloft_rr200us/256.txt create mode 100644 paper_results/schbench/skyloft_rr200us/32.txt create mode 100644 paper_results/schbench/skyloft_rr200us/4.txt create mode 100644 paper_results/schbench/skyloft_rr200us/40.txt create mode 100644 paper_results/schbench/skyloft_rr200us/48.txt create mode 100644 paper_results/schbench/skyloft_rr200us/64.txt create mode 100644 paper_results/schbench/skyloft_rr200us/72.txt create mode 100644 paper_results/schbench/skyloft_rr200us/8.txt create mode 100644 paper_results/schbench/skyloft_rr200us/80.txt create mode 100644 paper_results/schbench/skyloft_rr200us/96.txt create mode 100644 paper_results/schbench/skyloft_rr200us/all.csv create mode 100644 paper_results/schbench/skyloft_rr50us/112.txt create mode 100644 paper_results/schbench/skyloft_rr50us/128.txt create mode 100644 paper_results/schbench/skyloft_rr50us/144.txt create mode 100644 paper_results/schbench/skyloft_rr50us/16.txt create mode 100644 paper_results/schbench/skyloft_rr50us/160.txt create mode 100644 paper_results/schbench/skyloft_rr50us/176.txt create mode 100644 paper_results/schbench/skyloft_rr50us/192.txt create mode 100644 paper_results/schbench/skyloft_rr50us/208.txt create mode 100644 paper_results/schbench/skyloft_rr50us/224.txt create mode 100644 paper_results/schbench/skyloft_rr50us/24.txt create mode 100644 paper_results/schbench/skyloft_rr50us/240.txt create mode 100644 paper_results/schbench/skyloft_rr50us/256.txt create mode 100644 paper_results/schbench/skyloft_rr50us/32.txt create mode 100644 paper_results/schbench/skyloft_rr50us/4.txt create mode 100644 paper_results/schbench/skyloft_rr50us/40.txt create mode 100644 paper_results/schbench/skyloft_rr50us/48.txt create mode 100644 paper_results/schbench/skyloft_rr50us/56.txt create mode 100644 paper_results/schbench/skyloft_rr50us/64.txt create mode 100644 paper_results/schbench/skyloft_rr50us/72.txt create mode 100644 paper_results/schbench/skyloft_rr50us/8.txt create mode 100644 paper_results/schbench/skyloft_rr50us/80.txt create mode 100644 paper_results/schbench/skyloft_rr50us/88.txt create mode 100644 paper_results/schbench/skyloft_rr50us/96.txt create mode 100644 paper_results/schbench/skyloft_rr50us/all.csv create mode 100644 paper_results/synthetic/99.5-4-0.5-10000-lcbe/cfs-be create mode 100644 paper_results/synthetic/99.5-4-0.5-10000-lcbe/cfs-lc create mode 100644 paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-20us-be create mode 100644 paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-20us-lc create mode 100644 paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-30us-be create mode 100644 paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-30us-lc create mode 100644 paper_results/synthetic/99.5-4-0.5-10000-lcbe/shinjuku-30us-be create mode 100644 paper_results/synthetic/99.5-4-0.5-10000-lcbe/shinjuku-30us-lc create mode 100644 paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-20us-be create mode 100644 paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-20us-lc create mode 100644 paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-30us-be create mode 100644 paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-30us-lc create mode 100644 paper_results/synthetic/99.5-4-0.5-10000/cfs create mode 100644 paper_results/synthetic/99.5-4-0.5-10000/ghost-100us create mode 100644 paper_results/synthetic/99.5-4-0.5-10000/ghost-10us create mode 100644 paper_results/synthetic/99.5-4-0.5-10000/ghost-20us create mode 100644 paper_results/synthetic/99.5-4-0.5-10000/ghost-30us create mode 100644 paper_results/synthetic/99.5-4-0.5-10000/shinjuku-10us create mode 100644 paper_results/synthetic/99.5-4-0.5-10000/shinjuku-30us create mode 100644 paper_results/synthetic/99.5-4-0.5-10000/shinjuku-5us create mode 100644 paper_results/synthetic/99.5-4-0.5-10000/skyloft-10us create mode 100644 paper_results/synthetic/99.5-4-0.5-10000/skyloft-20us create mode 100644 paper_results/synthetic/99.5-4-0.5-10000/skyloft-30us create mode 100644 paper_results/synthetic/99.5-4-0.5-10000/skyloft-inf create mode 100644 params.h.in create mode 100644 requirements.txt create mode 100644 scripts/.gitignore create mode 100755 scripts/bench/schbench.sh create mode 100755 scripts/build.sh create mode 100644 scripts/cmake/rocksdb.mk create mode 100755 scripts/disable_cpufreq_scaling.sh create mode 100755 scripts/install_deps.sh create mode 100755 scripts/make_rootfs.sh create mode 100644 scripts/params/default.params create mode 100644 scripts/params/memcached.params create mode 100644 scripts/params/rocksdb-server-10us.params create mode 100644 scripts/params/rocksdb-server-20us.params create mode 100644 scripts/params/rocksdb-server-5us.params create mode 100644 scripts/params/rocksdb-server.params create mode 100644 scripts/params/schbench-cfs-50us.params create mode 100644 scripts/params/schbench-rr-1ms.params create mode 100644 scripts/params/schbench-rr-200us.params create mode 100644 scripts/params/schbench-rr-50us.params create mode 100644 scripts/params/schbench-rr.params create mode 100644 scripts/params/shinjuku.params create mode 100644 scripts/params/thread.params create mode 100644 scripts/plots/common.py create mode 100644 scripts/plots/plot_memcached.py create mode 100644 scripts/plots/plot_schbench.py create mode 100644 scripts/plots/plot_schbench2.py create mode 100644 scripts/plots/plot_synthetic.py create mode 100644 scripts/plots/requirements.txt create mode 100755 scripts/run.sh create mode 100755 scripts/run_experiments.sh create mode 100755 scripts/run_shinjuku.sh create mode 100755 scripts/run_synthetic_lc.sh create mode 100755 scripts/run_synthetic_lcbe.sh create mode 100755 scripts/setup_host.sh create mode 100644 synthetic/CMakeLists.txt create mode 100644 synthetic/antagonist/main.cc create mode 100644 synthetic/rocksdb/common.cc create mode 100644 synthetic/rocksdb/common.h create mode 100644 synthetic/rocksdb/jbsq.h create mode 100644 synthetic/rocksdb/native.cc create mode 100644 synthetic/rocksdb/random.cc create mode 100644 synthetic/rocksdb/random.h create mode 100644 synthetic/rocksdb/shinjuku.cc create mode 100644 synthetic/rocksdb/shinjuku_old.cc create mode 100644 utils/CMakeLists.txt create mode 100644 utils/bitmap.c create mode 100644 utils/include/utils/assert.h create mode 100644 utils/include/utils/atomic.h create mode 100644 utils/include/utils/bitmap.h create mode 100644 utils/include/utils/byteorder.h create mode 100644 utils/include/utils/cksum.h create mode 100644 utils/include/utils/cpu.h create mode 100644 utils/include/utils/defs.h create mode 100644 utils/include/utils/gen.h create mode 100644 utils/include/utils/hash.h create mode 100644 utils/include/utils/init.h create mode 100644 utils/include/utils/kref.h create mode 100644 utils/include/utils/list.h create mode 100644 utils/include/utils/log.h create mode 100644 utils/include/utils/lrpc.h create mode 100644 utils/include/utils/msgq.h create mode 100644 utils/include/utils/ops.h create mode 100644 utils/include/utils/queue.h create mode 100644 utils/include/utils/rbtree.h create mode 100644 utils/include/utils/shm.h create mode 100644 utils/include/utils/spinlock.h create mode 100644 utils/include/utils/syscalls.h create mode 100644 utils/include/utils/time.h create mode 100644 utils/include/utils/types.h create mode 100644 utils/include/utils/uintr.h create mode 100644 utils/list.c create mode 100644 utils/log.c create mode 100644 utils/lrpc.c create mode 100644 utils/rbtree.c diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..6d333bf --- /dev/null +++ b/.clang-format @@ -0,0 +1,16 @@ +--- +Language: Cpp +BasedOnStyle: LLVM +IndentWidth: 4 +ColumnLimit: 100 +AllowShortBlocksOnASingleLine: Empty +# AllowShortFunctionsOnASingleLine: Inline +AllowShortLoopsOnASingleLine: true +AllowShortIfStatementsOnASingleLine: Never +AlignConsecutiveMacros: true +AlignEscapedNewlines: Left +BreakBeforeBraces: Custom +BraceWrapping: + AfterFunction: true +PointerAlignment: Right +--- diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..00f4d32 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,30 @@ +name: Build CI + +on: [push, pull_request] + +jobs: + build-libos: + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v3 + - name: Install dependencies + run: sudo apt-get install -y cmake libnuma-dev libgflags-dev zlib1g-dev libzstd-dev + - name: Cache build + uses: actions/cache@v3 + with: + path: ${{github.workspace}}/build + key: ${{ runner.os }}-build-${{ env.cache-name }} + - name: Build libos and apps + run: CMAKE_ARGS="-DROCKSDB_JOBS=1" SCHED=fifo make DPDK=0 + + + build-kmod: + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v3 + - name: Build kernel module + run: make kmod diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..944fe87 --- /dev/null +++ b/.gitignore @@ -0,0 +1,17 @@ +.vscode +*.o +*.ko +*.cmd +*.mod +*.mod.c +*.symvers +*.order +.DS_Store +build/ +main +.cache/ +__pycache__/ +compile_commands.json +!extra.symvers +include/skyloft/uapi/params.h +include/skyloft/params.h diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..112f23e --- /dev/null +++ b/.gitmodules @@ -0,0 +1,8 @@ +[submodule "apps/schbench"] + path = apps/schbench + url = git@github.com:yhtzd/schbench.git + branch = skyloft +[submodule "apps/memcached"] + path = apps/memcached + url = git@github.com:yhtzd/memcached.git + branch = skyloft diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..0f29800 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,93 @@ +cmake_minimum_required(VERSION 3.5) +project(skyloft LANGUAGES C CXX ASM) + +if(DEBUG) + set(CMAKE_BUILD_TYPE "Debug") + add_definitions(-DDEBUG) +endif() + +set(CMAKE_C_STANDARD 23) +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O3 -Wall -Wextra -Wno-unused-parameter") +if (DPDK) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") +elseif (SCHED_POLICY MATCHES "^(fifo|rr|cfs)$") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mno-sse") +endif() + +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -W -Wextra -Wall") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wsign-compare -Wshadow -Wno-unused-parameter -Wno-unused-variable -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers -Wno-strict-aliasing") + +set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -T ${CMAKE_CURRENT_SOURCE_DIR}/libos/libos.ld") + +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) + +if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer") + + include(CheckCXXCompilerFlag) + CHECK_CXX_COMPILER_FLAG("-momit-leaf-frame-pointer" HAVE_OMIT_LEAF_FRAME_POINTER) + + if(HAVE_OMIT_LEAF_FRAME_POINTER) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -momit-leaf-frame-pointer") + endif() +endif() + +if(NOT USE_RTTI) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") +endif() + +# Hook exit in all targets +add_compile_options(-Wl,--wrap=exit) +add_link_options(-Wl,--wrap=exit) + +set(CMAKE_SCRIPTS ${CMAKE_CURRENT_SOURCE_DIR}/scripts/cmake/) + +if(STAT) + add_definitions(-DSKYLOFT_STAT) +endif() + +if(UINTR) + add_definitions(-DSKYLOFT_UINTR) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -muintr") +endif() + +if(LOG_LEVEL) + if(NOT LOG_LEVEL MATCHES "^(debug|info|notice|warn|err|crit)$") + message(FATAL_ERROR "Invalid log level: ${LOG_LEVEL}") + endif() + + string(TOUPPER ${LOG_LEVEL} LOG_LEVEL_UPPER) + add_definitions(-DLOG_LEVEL_${LOG_LEVEL_UPPER}) +endif() + +message(STATUS "Log level: ${LOG_LEVEL}") + +if(NOT CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "") + set(CMAKE_BUILD_TYPE "Release" CACHE STRING "" FORCE) +endif() + +message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") +message(STATUS "Schedule policy: ${SCHED_POLICY}") + +if(SCHED_POLICY STREQUAL "fifo") + add_definitions(-DSKYLOFT_SCHED_FIFO) +elseif(SCHED_POLICY STREQUAL "rr") + add_definitions(-DSKYLOFT_SCHED_FIFO2) +elseif(SCHED_POLICY STREQUAL "cfs") + add_definitions(-DSKYLOFT_SCHED_CFS) +elseif(SCHED_POLICY STREQUAL "sq") + add_definitions(-DSKYLOFT_SCHED_SQ) +elseif(SCHED_POLICY STREQUAL "sq_lcbe") + add_definitions(-DSKYLOFT_SCHED_SQ_LCBE) +endif() + +add_subdirectory(utils) +add_subdirectory(libos) + +if (SCHED_POLICY MATCHES "^(sq|sq_lcbe)$") + add_subdirectory(synthetic) +else() + add_subdirectory(apps) +endif() + diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..66e9465 --- /dev/null +++ b/Makefile @@ -0,0 +1,60 @@ +SIGNAL ?= +DPDK ?= 1 +TIMER ?= 1 +UINTR ?= 0 +SCHED ?= fifo +DAEMON ?= +DEBUG ?= +STAT ?= +LOG ?= info + +CC ?= gcc +CFLAGS := -Wall -O2 -D_GNU_SOURCE +CMAKE_ARGS ?= + +CMAKE_ARGS += -DSCHED_POLICY=$(SCHED) +CMAKE_ARGS += -DSIGNAL=$(SIGNAL) -DDPDK=$(DPDK) -DTIMER=$(TIMER) -DUINTR=$(UINTR) -DDAEMON=$(DAEMON) -DDEBUG=$(DEBUG) -DSTAT=$(STAT) -DLOG_LEVEL=$(LOG) +CMAKE_ARGS += -DCMAKE_INSTALL_PREFIX=install + +all: build + +build: + mkdir -p build + cd build && cmake .. $(CMAKE_ARGS) && make VERBOSE=1 -j4 + +install: build + cd build && make install + +kmod: + cd kmod && make + +insmod: + cd kmod && make insmod + +tests: + cd tests && make + +memcached: install + cd build && make memcached VERBOSE=1 + +zstd: install + cd build && make zstd VERBOSE=1 + +schbench: install + cd build && make schbench VERBOSE=1 + +rocksdb: install + cd build && make rocksdb_server VERBOSE=1 + +fmt: + @clang-format --style=file -i $(shell find utils/ libos/ apps/ tests/ experiments/ -iname '*.c' -o -iname '*.cc' -o -iname '*.h') + +clean: + make -C build clean + +distclean: clean + rm -rf build + make -C tests clean + make -C kmod clean + +.PHONY: all clean fmt kmod build tests install diff --git a/README.md b/README.md new file mode 100644 index 0000000..57966ba --- /dev/null +++ b/README.md @@ -0,0 +1,29 @@ +# Skyloft + +Skyloft: A General High-Efficient Scheduling Framework in User Space + +## Overview + +
+ +
+ +### Layout + +- `apps/`: Benchmark real-world applications +- `docs/`: Documents and images +- `synthetic/`: Benchmark c-FCFS and PS scheduling policies + - `rocksdb/`: Latency-critical application + - `antagonist/`: Batch application +- `kmod/`: Skyloft kernel module +- `libos/`: Skyloft main code + - `io/`: IO thread + - `net/`: Network stack + - `shim/`: Shim layer for POSIX APIs + - `sync/`: Synchronization primitives + - `mm/`: Memory management + - `sched/`: Schedulers +- `utils/`: Useful tools +- `scripts/`: Setup machine; run experiments +- `microbench/`: Microbenchmarks and prototypes +- `paper_results/`: Experimental results in the paper diff --git a/apps/CMakeLists.txt b/apps/CMakeLists.txt new file mode 100644 index 0000000..e9b38a2 --- /dev/null +++ b/apps/CMakeLists.txt @@ -0,0 +1,55 @@ +add_executable(hello hello.c) +target_link_libraries(hello skyloft) + +add_executable(bench bench.c) +target_link_libraries(bench skyloft) + +add_executable(bench_app_switch bench_app_switch.c) +target_link_libraries(bench_app_switch skyloft) + +add_executable(bench_pthread bench_pthread.c) +target_link_libraries(bench_pthread pthread utils) + +add_executable(hello_shim hello_shim.c) +target_link_libraries(hello_shim PRIVATE shim) +set_target_properties(hello_shim PROPERTIES LINK_FLAGS "-Wl,--wrap=main") +target_include_directories(hello_shim PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../shim/include) + +add_executable(test_timer test_timer.c) +target_link_libraries(test_timer skyloft utils) + +add_executable(test_rcu test_rcu.c) +target_link_libraries(test_rcu skyloft utils) + +add_executable(test_net test_net.c) +target_link_libraries(test_net skyloft utils) + +if(DPDK) + add_executable(fakework fakework.c) + target_link_libraries(fakework skyloft utils) + + include(${CMAKE_SCRIPTS}/rocksdb.mk) + add_custom_target( + rocksdb_server + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb_server + COMMAND make clean && make SKYLOFT_DIR=${CMAKE_CURRENT_BINARY_DIR}/../install ROCKSDB_SRC=${rocksdb_SOURCE_DIR}/ + COMMAND ln -sf ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb_server/rocksdb_server ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/rocksdb_server + ) + add_dependencies(rocksdb_server librocksdb) +endif() + +add_custom_target( + memcached + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/memcached + COMMAND ./autogen.sh + COMMAND ./configure --with-skyloft=${CMAKE_CURRENT_BINARY_DIR}/../install + COMMAND make clean && make -j + COMMAND cp memcached ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/memcached +) + +add_custom_target( + schbench + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/schbench + COMMAND make clean && make SKYLOFT_DIR=${CMAKE_CURRENT_BINARY_DIR}/../install + COMMAND cp schbench ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/schbench +) diff --git a/apps/bench.c b/apps/bench.c new file mode 100644 index 0000000..fb747a9 --- /dev/null +++ b/apps/bench.c @@ -0,0 +1,79 @@ +#include +#include + +#include +#include +#include + +#include "bench_common.h" + +#define ROUNDS 10000000 +#define ROUNDS2 10000 + +static atomic_int counter = 0; + +static void null_fn(void *) +{ + atomic_fetch_add(&counter, 1); +} + +static void thread_yield_fn(void *) +{ + for (int i = 0; i < ROUNDS / 2; ++i) sl_task_yield(); + atomic_fetch_add(&counter, 1); +} + +static void bench_spawn() +{ + for (int i = 0; i < ROUNDS; ++i) { + atomic_store(&counter, 0); + sl_task_spawn(null_fn, NULL, 0); + sl_task_yield(); + } +} + +#ifndef SKYLOFT_SCHED_SQ +static void bench_spawn2() +{ + atomic_store(&counter, 0); + for (int i = 0; i < ROUNDS2; ++i) { + sl_task_spawn(null_fn, NULL, 0); + } + for (int i = 0; i < ROUNDS2; ++i) { + sl_task_yield(); + } +} +#endif + +static void bench_yield() +{ + atomic_store(&counter, 0); + + sl_task_spawn(thread_yield_fn, NULL, 0); + thread_yield_fn(NULL); + + while (atomic_load(&counter) < 2) { + sl_task_yield(); + } +} + +static void bench_task_create() +{ + for (int i = 0; i < ROUNDS2; i++) task_create(null_fn, NULL); +} + +void app_main(void *arg) +{ + bench_one("yield", bench_yield, ROUNDS); + bench_one("spawn", bench_spawn, ROUNDS); +#ifndef SKYLOFT_SCHED_SQ + bench_one("spawn2", bench_spawn2, ROUNDS2); +#endif + bench_one("task_create", bench_task_create, ROUNDS2); +} + +int main(int argc, char *argv[]) +{ + printf("Skyloft micro-benchmarks\n"); + sl_libos_start(app_main, NULL); +} diff --git a/apps/bench_app_switch.c b/apps/bench_app_switch.c new file mode 100644 index 0000000..6a3ce1b --- /dev/null +++ b/apps/bench_app_switch.c @@ -0,0 +1,78 @@ +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "bench_common.h" + +#define SHM_SIZE 4096 +#define SHM_PATH "/skyloft_app_bench" + +#define NUM_APPS 2 +#define ROUNDS 1000000 + +static int shm_fd; + +void *open_shared_memory() +{ + shm_fd = shm_open(SHM_PATH, O_CREAT | O_RDWR, 0666); + if (shm_fd < 0) { + printf("shm_open failed: %d\n", errno); + return NULL; + } + + if (ftruncate(shm_fd, SHM_SIZE) < 0) { + printf("ftruncate failed: %d\n", errno); + return NULL; + } + + void *addr = mmap(NULL, SHM_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); + if (addr == MAP_FAILED) { + return NULL; + } + + return addr; +} + +void close_shared_memory(void *addr) +{ + munmap(addr, SHM_SIZE); + close(shm_fd); + shm_unlink(SHM_PATH); +} + +void bench_yield_between_apps() +{ + for (int i = 0; i < ROUNDS / NUM_APPS; ++i) { + sl_task_yield(); + } +} + +void app_main(void *arg) +{ + atomic_int *flag = open_shared_memory(); + assert(flag); + atomic_inc(flag); + + printf("Wait for other apps to start ...\n"); + while (atomic_load_acq(flag) < NUM_APPS) { + sl_task_yield(); + } + printf("All apps entered, start benchmarking ...\n"); + + bench_one("yield_between_apps", bench_yield_between_apps, ROUNDS); + + atomic_dec(flag); + close_shared_memory(flag); +} + +int main(int argc, char *argv[]) +{ + printf("Benmark for yield between apps\n"); + sl_libos_start(app_main, NULL); +} diff --git a/apps/bench_common.h b/apps/bench_common.h new file mode 100644 index 0000000..44b333b --- /dev/null +++ b/apps/bench_common.h @@ -0,0 +1,13 @@ +#include + +#include + +static void bench_one(const char *name, void(bench_fn)(), int rounds) +{ + __nsec before = now_ns(); + bench_fn(); + __nsec after = now_ns(); + __nsec elapsed = (after - before + rounds / 2) / rounds; + + printf("%s: %ldns (%ld / %d)\n", name, elapsed, after - before, rounds); +} diff --git a/apps/bench_pthread.c b/apps/bench_pthread.c new file mode 100644 index 0000000..93a9cd1 --- /dev/null +++ b/apps/bench_pthread.c @@ -0,0 +1,43 @@ +#include +#include +#include + +#include "bench_common.h" + +#define ROUNDS 100000 + +static pthread_t thread; + +static void *null_fn(void *) +{ + return NULL; +} + +static void *thread_yield_fn(void *) +{ + for (int i = 0; i < ROUNDS / 2; ++i) sched_yield(); + return NULL; +} + +static void bench_spawn() +{ + for (int i = 0; i < ROUNDS; ++i) { + pthread_create(&thread, NULL, null_fn, NULL); + pthread_join(thread, NULL); + } +} + +static void bench_yield() +{ + pthread_create(&thread, NULL, thread_yield_fn, NULL); + thread_yield_fn(NULL); + + pthread_join(thread, NULL); +} + +int main(int argc, char *argv[]) +{ + printf("PThread micro-benchmarks\n"); + bench_one("yield", bench_yield, ROUNDS); + bench_one("spawn", bench_spawn, ROUNDS); +} diff --git a/apps/dpdk_netperf/.gitignore b/apps/dpdk_netperf/.gitignore new file mode 100644 index 0000000..0a90f5c --- /dev/null +++ b/apps/dpdk_netperf/.gitignore @@ -0,0 +1 @@ +dpdk_netperf \ No newline at end of file diff --git a/apps/dpdk_netperf/Makefile b/apps/dpdk_netperf/Makefile new file mode 100644 index 0000000..c81461a --- /dev/null +++ b/apps/dpdk_netperf/Makefile @@ -0,0 +1,44 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +PKGCONF = pkg-config + +CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk) +LDFLAGS += $(shell $(PKGCONF) --libs libdpdk) + +# binary name +APP = dpdk_netperf + +# all source are stored in SRCS-y +SRCS-y := dpdk_netperf.c + +$(APP): $(SRCS-y) Makefile + $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) diff --git a/apps/dpdk_netperf/README.md b/apps/dpdk_netperf/README.md new file mode 100644 index 0000000..7cebc81 --- /dev/null +++ b/apps/dpdk_netperf/README.md @@ -0,0 +1,52 @@ +# Latency Benchmarks + +**Remove Skyloft kernel module before running this benchmark!**. + +First build DPDK (without driver modifications), then build +dpdk_netperf in this directory with `make clean && make`. + +## DPDK only +To run the benchmark with pure DPDK on both machines: + +On the server (IP 192.168.1.2): +``` +sudo ./build/dpdk_netperf -l2 --socket-mem=128 -- UDP_SERVER 192.168.1.2 +``` + +On the client (IP 192.168.1.3): +``` +sudo ./build/dpdk_netperf -l2 --socket-mem=128 -- UDP_CLIENT 192.168.1.3 192.168.1.2 50000 8001 10 8 50 +``` + +## Shenango spinning (IOKernel + runtime) + +To run Shenango with the server runtime thread spinning, start the +IOKernel and then in `shenango/apps/bench`: + +``` +./netbench_udp tbench.config server +``` +Then run the client as above. + +## Shenango waking (IOKernel + runtime + wakeup) + +To run with Shenango in its default mode but no batch work, start the +IOKernel and then in `shenango/apps/bench`: +``` +./netbench_udp waking.config server +``` +Then run the client as above. + +## Shenango preempting (IOKernel + runtime + wakeup + preemption) + +To run Shenango with a batch application running concurrently, start +the IOKernel and then in `shenango/apps/bench`: +``` +./stress stress.config 100 100 sqrt +./netbench_udp waking.config server +``` + +Then run the client as above. If your server does not have 24 +hyperthreads, you will need to adjust `runtime_kthreads` in +stress.config to be 2 fewer than the number of hyperthreads on your +server. \ No newline at end of file diff --git a/apps/dpdk_netperf/dpdk_netperf.c b/apps/dpdk_netperf/dpdk_netperf.c new file mode 100644 index 0000000..0eb7260 --- /dev/null +++ b/apps/dpdk_netperf/dpdk_netperf.c @@ -0,0 +1,757 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RX_RING_SIZE 128 +#define TX_RING_SIZE 128 + +#define NUM_MBUFS 8191 +#define MBUF_CACHE_SIZE 250 +#define BURST_SIZE 32 +#define MAX_CORES 64 +#define UDP_MAX_PAYLOAD 1472 +#define MAX_SAMPLES (100 * 1000 * 1000) +#define RANDOM_US 10 + +static const struct rte_eth_conf port_conf_default = { + .rxmode = + { + .mtu = RTE_ETHER_MAX_LEN, + .offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, + .mq_mode = RTE_ETH_MQ_RX_RSS, + }, + .rx_adv_conf = + { + .rss_conf = + { + .rss_key = NULL, + .rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP, + }, + }, + .txmode = + { + .offloads = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM, + }, +}; + +uint32_t kMagic = 0x6e626368; // 'nbch' + +struct nbench_req { + uint32_t magic; + int nports; +}; + +struct nbench_resp { + uint32_t magic; + int nports; + uint16_t ports[]; +}; + +enum { + MODE_UDP_CLIENT = 0, + MODE_UDP_SERVER, +}; + +#define MAKE_IP_ADDR(a, b, c, d) \ + (((uint32_t)a << 24) | ((uint32_t)b << 16) | ((uint32_t)c << 8) | (uint32_t)d) + +static unsigned int dpdk_port = 0; +static uint8_t mode; +struct rte_mempool *rx_mbuf_pool; +struct rte_mempool *tx_mbuf_pool; +static struct rte_ether_addr my_eth; +static uint32_t my_ip; +static uint32_t server_ip; +static int seconds; +static size_t payload_len; +static unsigned int interval_us; +static unsigned int client_port; +static unsigned int server_port; +static unsigned int num_queues = 1; +struct rte_ether_addr zero_mac = {.addr_bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}; +struct rte_ether_addr broadcast_mac = {.addr_bytes = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}}; +uint16_t next_port = 50000; +static uint64_t snd_times[MAX_SAMPLES]; +static uint64_t rcv_times[MAX_SAMPLES]; +char *output_filename = NULL; + +/* dpdk_netperf.c: simple implementation of netperf on DPDK */ + +static int str_to_ip(const char *str, uint32_t *addr) +{ + uint8_t a, b, c, d; + if (sscanf(str, "%hhu.%hhu.%hhu.%hhu", &a, &b, &c, &d) != 4) { + return -EINVAL; + } + + *addr = MAKE_IP_ADDR(a, b, c, d); + return 0; +} + +static int str_to_long(const char *str, long *val) +{ + char *endptr; + + *val = strtol(str, &endptr, 10); + if (endptr == str || (*endptr != '\0' && *endptr != '\n') || + ((*val == LONG_MIN || *val == LONG_MAX) && errno == ERANGE)) + return -EINVAL; + return 0; +} + +/* + * Initializes a given port using global settings and with the RX buffers + * coming from the mbuf_pool passed as a parameter. + */ +static inline int port_init(uint8_t port, struct rte_mempool *mbuf_pool, unsigned int n_queues) +{ + struct rte_eth_conf port_conf = port_conf_default; + const uint16_t rx_rings = n_queues, tx_rings = n_queues; + uint16_t nb_rxd = RX_RING_SIZE; + uint16_t nb_txd = TX_RING_SIZE; + int retval; + uint16_t q; + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf *txconf; + + printf("initializing with %u queues\n", n_queues); + + if (!rte_eth_dev_is_valid_port(port)) + return -1; + + /* Configure the Ethernet device. */ + retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); + if (retval != 0) + return retval; + + retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd); + if (retval != 0) + return retval; + + /* Allocate and set up 1 RX queue per Ethernet port. */ + for (q = 0; q < rx_rings; q++) { + retval = + rte_eth_rx_queue_setup(port, q, nb_rxd, rte_eth_dev_socket_id(port), NULL, mbuf_pool); + if (retval < 0) + return retval; + } + + /* Enable TX offloading */ + rte_eth_dev_info_get(0, &dev_info); + txconf = &dev_info.default_txconf; + + /* Allocate and set up 1 TX queue per Ethernet port. */ + for (q = 0; q < tx_rings; q++) { + retval = rte_eth_tx_queue_setup(port, q, nb_txd, rte_eth_dev_socket_id(port), txconf); + if (retval < 0) + return retval; + } + + /* Start the Ethernet port. */ + retval = rte_eth_dev_start(port); + if (retval < 0) + return retval; + + /* Display the port MAC address. */ + rte_eth_macaddr_get(port, &my_eth); + printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 + "\n", + (unsigned)port, my_eth.addr_bytes[0], my_eth.addr_bytes[1], my_eth.addr_bytes[2], + my_eth.addr_bytes[3], my_eth.addr_bytes[4], my_eth.addr_bytes[5]); + + /* Enable RX in promiscuous mode for the Ethernet device. */ + rte_eth_promiscuous_enable(port); + + return 0; +} + +/* + * Send out an arp. + */ +static void send_arp(uint16_t op, struct rte_ether_addr dst_eth, uint32_t dst_ip) +{ + struct rte_mbuf *buf; + char *buf_ptr; + struct rte_ether_hdr *eth_hdr; + struct rte_arp_hdr *a_hdr; + int nb_tx; + + buf = rte_pktmbuf_alloc(tx_mbuf_pool); + if (buf == NULL) + printf("error allocating arp mbuf\n"); + + /* ethernet header */ + buf_ptr = rte_pktmbuf_append(buf, RTE_ETHER_HDR_LEN); + eth_hdr = (struct rte_ether_hdr *)buf_ptr; + + rte_ether_addr_copy(&my_eth, ð_hdr->src_addr); + rte_ether_addr_copy(&dst_eth, ð_hdr->dst_addr); + eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP); + + /* arp header */ + buf_ptr = rte_pktmbuf_append(buf, sizeof(struct rte_arp_hdr)); + a_hdr = (struct rte_arp_hdr *)buf_ptr; + a_hdr->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER); + a_hdr->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + a_hdr->arp_hlen = RTE_ETHER_ADDR_LEN; + a_hdr->arp_plen = 4; + a_hdr->arp_opcode = rte_cpu_to_be_16(op); + + rte_ether_addr_copy(&my_eth, &a_hdr->arp_data.arp_sha); + a_hdr->arp_data.arp_sip = rte_cpu_to_be_32(my_ip); + rte_ether_addr_copy(&dst_eth, &a_hdr->arp_data.arp_tha); + a_hdr->arp_data.arp_tip = rte_cpu_to_be_32(dst_ip); + + nb_tx = rte_eth_tx_burst(dpdk_port, 0, &buf, 1); + if (unlikely(nb_tx != 1)) { + printf("error: could not send arp packet\n"); + } +} + +/* + * Validate this ethernet header. Return true if this packet is for higher + * layers, false otherwise. + */ +static bool check_eth_hdr(struct rte_mbuf *buf) +{ + struct rte_ether_hdr *ptr_mac_hdr; + struct rte_arp_hdr *a_hdr; + + ptr_mac_hdr = rte_pktmbuf_mtod(buf, struct rte_ether_hdr *); + if (!rte_is_same_ether_addr(&ptr_mac_hdr->dst_addr, &my_eth) && + !rte_is_broadcast_ether_addr(&ptr_mac_hdr->dst_addr)) { + /* packet not to our ethernet addr */ + return false; + } + + if (ptr_mac_hdr->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) { + /* reply to ARP if necessary */ + a_hdr = rte_pktmbuf_mtod_offset(buf, struct rte_arp_hdr *, sizeof(struct rte_ether_hdr)); + if (a_hdr->arp_opcode == rte_cpu_to_be_16(RTE_ARP_OP_REQUEST) && + a_hdr->arp_data.arp_tip == rte_cpu_to_be_32(my_ip)) + send_arp(RTE_ARP_OP_REPLY, a_hdr->arp_data.arp_sha, + rte_be_to_cpu_32(a_hdr->arp_data.arp_sip)); + return false; + } + + if (ptr_mac_hdr->ether_type != rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) + /* packet not IPv4 */ + return false; + + return true; +} + +/* + * Return true if this IP packet is to us and contains a UDP packet, + * false otherwise. + */ +static bool check_ip_hdr(struct rte_mbuf *buf) +{ + struct rte_ipv4_hdr *ipv4_hdr; + + ipv4_hdr = rte_pktmbuf_mtod_offset(buf, struct rte_ipv4_hdr *, RTE_ETHER_HDR_LEN); + if (ipv4_hdr->dst_addr != rte_cpu_to_be_32(my_ip) || ipv4_hdr->next_proto_id != IPPROTO_UDP) + return false; + + return true; +} + +/* + * Run a netperf client + */ +static void do_client(uint8_t port) +{ + uint64_t start_time, end_time, next_send_time; + struct rte_mbuf *bufs[BURST_SIZE]; + struct rte_mbuf *buf; + struct rte_ether_hdr *ptr_mac_hdr; + struct rte_arp_hdr *a_hdr; + char *buf_ptr; + struct rte_ether_hdr *eth_hdr; + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_udp_hdr *rte_udp_hdr; + uint32_t nb_tx, nb_rx, i; + uint64_t reqs = 0; + struct rte_ether_addr server_eth; + struct nbench_req *control_req; + struct nbench_resp *control_resp; + bool setup_port = false; + uint64_t interval_cycles, time_received; + uint32_t max_random_cycles; + + /* Verify that we have enough space for all the datapoints */ + uint32_t samples = seconds / ((float)interval_us / (1000 * 1000)); + if (samples > MAX_SAMPLES) + rte_exit(EXIT_FAILURE, "Too many samples: %d\n", samples); + + /* + * Check that the port is on the same NUMA node as the polling thread + * for best performance. + */ + if (rte_eth_dev_socket_id(port) > 0 && rte_eth_dev_socket_id(port) != (int)rte_socket_id()) + printf( + "WARNING, port %u (socket %d) is on remote NUMA node to polling thread (socket %d).\n\t" + "Performance will not be optimal.\n", + port, rte_eth_dev_socket_id(port), rte_socket_id()); + + printf("\nCore %u running in client mode. [Ctrl+C to quit]\n", rte_lcore_id()); + + /* get the mac address of the server via ARP */ + while (true) { + send_arp(RTE_ARP_OP_REQUEST, broadcast_mac, server_ip); + sleep(1); + + nb_rx = rte_eth_rx_burst(port, 0, bufs, BURST_SIZE); + if (nb_rx == 0) { + continue; + } + + for (i = 0; i < nb_rx; i++) { + buf = bufs[i]; + + ptr_mac_hdr = rte_pktmbuf_mtod(buf, struct rte_ether_hdr *); + if (!rte_is_same_ether_addr(&ptr_mac_hdr->dst_addr, &my_eth)) { + /* packet not to our ethernet addr */ + continue; + } + + if (ptr_mac_hdr->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) { + /* this is an ARP */ + a_hdr = rte_pktmbuf_mtod_offset(buf, struct rte_arp_hdr *, + sizeof(struct rte_ether_hdr)); + if (a_hdr->arp_opcode == rte_cpu_to_be_16(RTE_ARP_OP_REPLY) && + rte_is_same_ether_addr(&a_hdr->arp_data.arp_tha, &my_eth) && + a_hdr->arp_data.arp_tip == rte_cpu_to_be_32(my_ip)) { + /* got a response from server! */ + rte_ether_addr_copy(&a_hdr->arp_data.arp_sha, &server_eth); + goto got_mac; + } + } + } + } + printf("got_mac\n"); +got_mac: + + /* randomize inter-arrival times by up to RANDOM_US */ + srand(rte_get_timer_cycles()); + max_random_cycles = (float)RANDOM_US / (1000 * 1000) * rte_get_timer_hz(); + + /* run for specified amount of time */ + start_time = rte_get_timer_cycles(); + interval_cycles = (float)interval_us / (1000 * 1000) * rte_get_timer_hz(); + next_send_time = start_time; + while (rte_get_timer_cycles() < start_time + seconds * rte_get_timer_hz()) { + buf = rte_pktmbuf_alloc(tx_mbuf_pool); + if (buf == NULL) + printf("error allocating tx mbuf\n"); + + /* ethernet header */ + buf_ptr = rte_pktmbuf_append(buf, RTE_ETHER_HDR_LEN); + eth_hdr = (struct rte_ether_hdr *)buf_ptr; + + rte_ether_addr_copy(&my_eth, ð_hdr->src_addr); + rte_ether_addr_copy(&server_eth, ð_hdr->dst_addr); + eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + + /* IPv4 header */ + buf_ptr = rte_pktmbuf_append(buf, sizeof(struct rte_ipv4_hdr)); + ipv4_hdr = (struct rte_ipv4_hdr *)buf_ptr; + ipv4_hdr->version_ihl = 0x45; + ipv4_hdr->type_of_service = 0; + ipv4_hdr->total_length = rte_cpu_to_be_16(sizeof(struct rte_ipv4_hdr) + + sizeof(struct rte_udp_hdr) + payload_len); + ipv4_hdr->packet_id = 0; + ipv4_hdr->fragment_offset = 0; + ipv4_hdr->time_to_live = 64; + ipv4_hdr->next_proto_id = IPPROTO_UDP; + ipv4_hdr->hdr_checksum = 0; + ipv4_hdr->src_addr = rte_cpu_to_be_32(my_ip); + ipv4_hdr->dst_addr = rte_cpu_to_be_32(server_ip); + + /* UDP header + data */ + buf_ptr = rte_pktmbuf_append(buf, sizeof(struct rte_udp_hdr) + payload_len); + rte_udp_hdr = (struct rte_udp_hdr *)buf_ptr; + rte_udp_hdr->src_port = rte_cpu_to_be_16(client_port); + rte_udp_hdr->dst_port = rte_cpu_to_be_16(server_port); + rte_udp_hdr->dgram_len = rte_cpu_to_be_16(sizeof(struct rte_udp_hdr) + payload_len); + rte_udp_hdr->dgram_cksum = 0; + memset(buf_ptr + sizeof(struct rte_udp_hdr), 0xAB, payload_len); + + /* control data in case our server is running netbench_udp */ + control_req = (struct nbench_req *)(buf_ptr + sizeof(struct rte_udp_hdr)); + control_req->magic = kMagic; + control_req->nports = 1; + + buf->l2_len = RTE_ETHER_HDR_LEN; + buf->l3_len = sizeof(struct rte_ipv4_hdr); + buf->ol_flags = RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4; + + /* send packet */ + snd_times[reqs] = rte_get_timer_cycles(); + nb_tx = rte_eth_tx_burst(port, 0, &buf, 1); + + if (unlikely(nb_tx != 1)) { + printf("error: could not send packet\n"); + } + + nb_rx = 0; + while (true) { + nb_rx = rte_eth_rx_burst(port, 0, bufs, BURST_SIZE); + time_received = rte_get_timer_cycles(); + if (nb_rx == 0) + continue; + + for (i = 0; i < nb_rx; i++) { + buf = bufs[i]; + + if (!check_eth_hdr(buf)) + goto no_match; + + /* this packet is IPv4, check IP header */ + if (!check_ip_hdr(buf)) + goto no_match; + + /* check UDP header */ + rte_udp_hdr = rte_pktmbuf_mtod_offset( + buf, struct rte_udp_hdr *, RTE_ETHER_HDR_LEN + sizeof(struct rte_ipv4_hdr)); + if (rte_udp_hdr->src_port != rte_cpu_to_be_16(server_port) || + rte_udp_hdr->dst_port != rte_cpu_to_be_16(client_port)) + goto no_match; + + if (!setup_port && rte_udp_hdr->dgram_len != + rte_cpu_to_be_16(sizeof(struct rte_udp_hdr) + payload_len)) { + /* use port specified by netbench_udp server */ + control_resp = + rte_pktmbuf_mtod_offset(buf, struct nbench_resp *, + RTE_ETHER_HDR_LEN + sizeof(struct rte_ipv4_hdr) + + sizeof(struct rte_udp_hdr)); + if (control_resp->nports != 1) + goto no_match; + server_port = control_resp->ports[0]; + + /* reset start time so we don't include control message RTT */ + start_time = rte_get_timer_cycles(); + setup_port = true; + } + + /* packet matches */ + rte_pktmbuf_free(buf); + goto found_match; + + no_match: + /* packet isn't what we're looking for, free it and rx again */ + rte_pktmbuf_free(buf); + } + } + found_match: + rcv_times[reqs++] = time_received; + next_send_time += + (interval_cycles + (rand() % max_random_cycles) - max_random_cycles * 0.5); + while (rte_get_timer_cycles() < next_send_time) { + break; + /* spin until time for next packet */ + } + } + end_time = rte_get_timer_cycles(); + + /* add up total cycles across all RTTs, skip first and last 10% */ + uint64_t total_cycles = 0; + uint64_t included_samples = 0; + for (i = reqs * 0.1; i < reqs * 0.9; i++) { + total_cycles += rcv_times[i] - snd_times[i]; + included_samples++; + } + + printf("ran for %f seconds, sent %" PRIu64 " packets\n", + (float)(end_time - start_time) / rte_get_timer_hz(), reqs); + printf("client reqs/s: %f\n", (float)(reqs * rte_get_timer_hz()) / (end_time - start_time)); + printf("mean latency (us): %f\n", + (float)total_cycles * 1000 * 1000 / (included_samples * rte_get_timer_hz())); + + if (output_filename != NULL) { + /* print all samples to output file */ + FILE *outfile = fopen(output_filename, "w"); + fprintf(outfile, "index,time_us\n"); + for (i = reqs * 0.1; i < reqs * 0.9; i++) { + float time_us = + ((float)(rcv_times[i] - snd_times[i]) * 1000 * 1000) / rte_get_timer_hz(); + fprintf(outfile, "%d,%f\n", i, time_us); + } + fclose(outfile); + } +} + +/* + * Run a netperf server + */ +static int do_server(void *arg) +{ + uint8_t port = dpdk_port; + uint8_t queue = (uint64_t)arg; + struct rte_mbuf *rx_bufs[BURST_SIZE]; + struct rte_mbuf *tx_bufs[BURST_SIZE]; + struct rte_mbuf *buf; + uint16_t nb_rx, n_to_tx, nb_tx, i, j, q; + struct rte_ether_hdr *ptr_mac_hdr; + struct rte_ether_addr src_addr; + struct rte_ipv4_hdr *ptr_ipv4_hdr; + uint32_t src_ip_addr; + uint16_t tmp_port; + struct nbench_req *control_req; + struct nbench_resp *control_resp; + + printf("on server core with lcore_id: %d, queue: %d\n", rte_lcore_id(), queue); + + /* + * Check that the port is on the same NUMA node as the polling thread + * for best performance. + */ + if (rte_eth_dev_socket_id(port) > 0 && rte_eth_dev_socket_id(port) != (int)rte_socket_id()) + printf( + "WARNING, port %u (socket %d) is on remote NUMA node to polling thread (socket %d).\n\t" + "Performance will not be optimal.\n", + port, rte_eth_dev_socket_id(port), rte_socket_id()); + + printf("\nCore %u running in server mode. [Ctrl+C to quit]\n", rte_lcore_id()); + + /* Run until the application is quit or killed. */ + for (;;) { + for (q = 0; q < num_queues; q++) { + + /* receive packets */ + nb_rx = rte_eth_rx_burst(port, q, rx_bufs, BURST_SIZE); + + if (nb_rx == 0) + continue; + + n_to_tx = 0; + for (i = 0; i < nb_rx; i++) { + buf = rx_bufs[i]; + + if (!check_eth_hdr(buf)) + goto free_buf; + + /* this packet is IPv4, check IP header */ + if (!check_ip_hdr(buf)) + goto free_buf; + + /* swap src and dst ether addresses */ + ptr_mac_hdr = rte_pktmbuf_mtod(buf, struct rte_ether_hdr *); + rte_ether_addr_copy(&ptr_mac_hdr->src_addr, &src_addr); + rte_ether_addr_copy(&ptr_mac_hdr->dst_addr, &ptr_mac_hdr->src_addr); + rte_ether_addr_copy(&src_addr, &ptr_mac_hdr->dst_addr); + + /* swap src and dst IP addresses */ + ptr_ipv4_hdr = + rte_pktmbuf_mtod_offset(buf, struct rte_ipv4_hdr *, RTE_ETHER_HDR_LEN); + src_ip_addr = ptr_ipv4_hdr->src_addr; + ptr_ipv4_hdr->src_addr = ptr_ipv4_hdr->dst_addr; + ptr_ipv4_hdr->dst_addr = src_ip_addr; + + /* swap UDP ports */ + struct rte_udp_hdr *rte_udp_hdr; + rte_udp_hdr = rte_pktmbuf_mtod_offset( + buf, struct rte_udp_hdr *, RTE_ETHER_HDR_LEN + sizeof(struct rte_ipv4_hdr)); + tmp_port = rte_udp_hdr->src_port; + rte_udp_hdr->src_port = rte_udp_hdr->dst_port; + rte_udp_hdr->dst_port = tmp_port; + + /* check if this is a control message and we need to reply with + * ports */ + control_req = rte_pktmbuf_mtod_offset( + buf, struct nbench_req *, + RTE_ETHER_HDR_LEN + sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_udp_hdr)); + if (control_req->magic == kMagic) { + rte_pktmbuf_append(buf, sizeof(struct nbench_resp) + + sizeof(uint16_t) * control_req->nports - + sizeof(struct nbench_req)); + control_resp = (struct nbench_resp *)control_req; + + /* add ports to response */ + for (j = 0; j < control_req->nports; j++) { + /* simple port allocation */ + control_resp->ports[j] = rte_cpu_to_be_16(next_port++); + } + + /* adjust lengths in UDP and IPv4 headers */ + payload_len = + sizeof(struct nbench_resp) + sizeof(uint16_t) * control_req->nports; + rte_udp_hdr->dgram_len = + rte_cpu_to_be_16(sizeof(struct rte_udp_hdr) + payload_len); + ptr_ipv4_hdr->total_length = rte_cpu_to_be_16( + sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_udp_hdr) + payload_len); + + /* enable computation of IPv4 checksum in hardware */ + ptr_ipv4_hdr->hdr_checksum = 0; + buf->l2_len = RTE_ETHER_HDR_LEN; + buf->l3_len = sizeof(struct rte_ipv4_hdr); + buf->ol_flags = RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4; + } + + tx_bufs[n_to_tx++] = buf; + continue; + + free_buf: + /* packet wasn't sent, free it */ + rte_pktmbuf_free(buf); + } + + /* transmit packets */ + nb_tx = rte_eth_tx_burst(port, q, tx_bufs, n_to_tx); + + if (nb_tx != n_to_tx) + printf("error: could not transmit all packets: %d %d\n", n_to_tx, nb_tx); + } + } + + return 0; +} + +/* + * Initialize dpdk. + */ +static int dpdk_init(int argc, char *argv[]) +{ + int args_parsed; + + /* Initialize the Environment Abstraction Layer (EAL). */ + args_parsed = rte_eal_init(argc, argv); + if (args_parsed < 0) + rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); + + /* Check that there is a port to send/receive on. */ + if (!rte_eth_dev_is_valid_port(0)) + rte_exit(EXIT_FAILURE, "Error: no available ports\n"); + + /* Creates a new mempool in memory to hold the mbufs. */ + rx_mbuf_pool = rte_pktmbuf_pool_create("MBUF_RX_POOL", NUM_MBUFS, MBUF_CACHE_SIZE, 0, + RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); + + if (rx_mbuf_pool == NULL) + rte_exit(EXIT_FAILURE, "Cannot create rx mbuf pool\n"); + + /* Creates a new mempool in memory to hold the mbufs. */ + tx_mbuf_pool = rte_pktmbuf_pool_create("MBUF_TX_POOL", NUM_MBUFS, MBUF_CACHE_SIZE, 0, + RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); + + if (tx_mbuf_pool == NULL) + rte_exit(EXIT_FAILURE, "Cannot create tx mbuf pool\n"); + + return args_parsed; +} + +static int parse_netperf_args(int argc, char *argv[]) +{ + long tmp; + + /* argv[0] is still the program name */ + if (argc < 3) { + printf("not enough arguments left: %d\n", argc); + return -EINVAL; + } + + str_to_ip(argv[2], &my_ip); + + if (!strcmp(argv[1], "UDP_CLIENT")) { + mode = MODE_UDP_CLIENT; + argc -= 3; + if (argc < 6) { + printf("not enough arguments left: %d\n", argc); + return -EINVAL; + } + str_to_ip(argv[3], &server_ip); + if (sscanf(argv[4], "%u", &client_port) != 1) + return -EINVAL; + if (sscanf(argv[5], "%u", &server_port) != 1) + return -EINVAL; + str_to_long(argv[6], &tmp); + seconds = tmp; + str_to_long(argv[7], &tmp); + payload_len = tmp; + str_to_long(argv[8], &tmp); + interval_us = tmp; + if (argc >= 7) { + /* long output file name */ + output_filename = argv[9]; + } + } else if (!strcmp(argv[1], "UDP_SERVER")) { + mode = MODE_UDP_SERVER; + argc -= 3; + if (argc >= 1) { + if (sscanf(argv[3], "%u", &num_queues) != 1) + return -EINVAL; + } + } else { + printf("invalid mode '%s'\n", argv[1]); + return -EINVAL; + } + + return 0; +} + +static void test_rx(void) +{ + uint8_t port = dpdk_port; + struct rte_mbuf *rx_bufs[BURST_SIZE]; + uint16_t nb_rx; + struct rte_ether_hdr *ptr_mac_hdr; + + nb_rx = rte_eth_rx_burst(port, 0, rx_bufs, BURST_SIZE); + + for (int i = 0; i < nb_rx; i++) { + struct rte_mbuf *buf = rx_bufs[i]; + ptr_mac_hdr = rte_pktmbuf_mtod(buf, struct rte_ether_hdr *); + printf("RX: %d\n", buf->data_len); + for (int j = 0; j < buf->data_len; j++) printf(" %02x", *((uint8_t *)ptr_mac_hdr + j)); + printf("\n"); + rte_pktmbuf_free(buf); + } +} + +/* + * The main function, which does initialization and calls the per-lcore + * functions. + */ +int main(int argc, char *argv[]) +{ + int args_parsed, res, lcore_id; + uint64_t i; + + // /* Initialize dpdk. */ + args_parsed = dpdk_init(argc, argv); + + /* initialize our arguments */ + argc -= args_parsed; + argv += args_parsed; + res = parse_netperf_args(argc, argv); + if (res < 0) + return 0; + + /* initialize port */ + if (mode == MODE_UDP_CLIENT && rte_lcore_count() > 1) + printf("\nWARNING: Too many lcores enabled. Only 1 used.\n"); + if (port_init(dpdk_port, rx_mbuf_pool, num_queues) != 0) + rte_exit(EXIT_FAILURE, "Cannot init port %" PRIu8 "\n", dpdk_port); + + // for (;;) test_rx(); + + if (mode == MODE_UDP_CLIENT) + do_client(dpdk_port); + else { + i = 0; + RTE_LCORE_FOREACH_WORKER(lcore_id) + rte_eal_remote_launch(do_server, (void *)i++, lcore_id); + do_server((void *)i); + } + + return 0; +} diff --git a/apps/fakework.c b/apps/fakework.c new file mode 100644 index 0000000..2c01656 --- /dev/null +++ b/apps/fakework.c @@ -0,0 +1,85 @@ +#include +#include + +#include +#include +#include +#include +#include +#include + +static struct netaddr listen_addr; + +static void __attribute__((noinline)) fake_work(unsigned int nloops) +{ + for (unsigned int i = 0; i++ < nloops;) { + asm volatile("nop"); + } +} + +// Shenango loadgen message format +struct payload { + uint64_t work_iterations; + uint64_t index; +}; + +extern __thread struct task* __curr; + +static void HandleRequest(udp_spawn_data_t *d) +{ + unsigned int niters = 0; + + if (d->len == sizeof(struct payload)) { + struct payload *p = (struct payload *)d->buf; + niters = ntoh64(p->work_iterations) * CPU_FREQ_MHZ / 1000; + } else { + panic("invalid message len %lu", d->len); + } + + // printf("niters: %d\n", niters); + // uint64_t a = now_tsc(); + + __curr->allow_preempt = true; + fake_work(niters); + __curr->allow_preempt = false; + // uint64_t b = now_tsc(); + // printf(" %ld\n", b - a); + + if (udp_respond(d->buf, d->len, d) != (ssize_t)d->len) + panic("bad write"); + udp_spawn_data_release(d->release_data); +} + +static void app_main(void *arg) +{ + udp_spawner_t *s; + waitgroup_t w; + + int ret = udp_create_spawner(listen_addr, HandleRequest, &s); + if (ret) + panic("ret %d", ret); + + waitgroup_init(&w); + waitgroup_add(&w, 1); + waitgroup_wait(&w); +} + +int main(int argc, char *argv[]) +{ + int ret; + + if (argc != 2) { + printf("usage: %s [portno]\n", argv[0]); + return -EINVAL; + } + + listen_addr.port = atoi(argv[1]); + + ret = sl_libos_start(app_main, NULL); + if (ret) { + printf("failed to start runtime\n"); + return ret; + } + + return 0; +} diff --git a/apps/hello.c b/apps/hello.c new file mode 100644 index 0000000..2d752c7 --- /dev/null +++ b/apps/hello.c @@ -0,0 +1,42 @@ +#include +#include + +#include +#include +#include + +void do_work(void *arg) +{ + int cpu_id = sl_current_cpu_id(); + assert(cpu_id == (int)(uintptr_t)arg); + printf("Hello from CPU %d\n", cpu_id); + + for (int i = 0; i < 10; i++) { + + __nsec before = now_ns(); + __nsec dur_ns = 1000 * NSEC_PER_MSEC; + + printf("running (%d, %d): iter=%d\n", sl_current_app_id(), sl_current_task_id(), i); + for (;;) { + if (now_ns() - before > dur_ns) + break; + } + + sl_task_yield(); + } +} + +void app_main(void *arg) +{ + printf("%d\n", USED_CPUS); + for (size_t i = 1; i < USED_CPUS; i++) { + sl_task_spawn_oncpu(i, do_work, (void *)i, 0); + } + do_work((void *)0); +} + +int main(int argc, char *argv[]) +{ + printf("Hello, world!\n"); + sl_libos_start(app_main, (void *)2333); +} diff --git a/apps/hello_shim.c b/apps/hello_shim.c new file mode 100644 index 0000000..32f5242 --- /dev/null +++ b/apps/hello_shim.c @@ -0,0 +1,32 @@ +#include +#include +#include + +pthread_mutex_t mutex; +volatile int count; + +void *do_work(void *arg) +{ + printf("Hello, world!! %ld\n", (long)arg); + for (int i = 0; i < 10; i++) { + sl_pthread_mutex_lock(&mutex); + count++; + sl_pthread_mutex_unlock(&mutex); + } + return (void *)1; +} + +int main(int argc, char *argv[]) +{ + pthread_t worker1, worker2; + + printf("Hello, world!\n"); + sl_pthread_mutex_init(&mutex, NULL); + sl_pthread_create(&worker1, NULL, do_work, (void *)1); + sl_pthread_create(&worker2, NULL, do_work, (void *)2); + sl_pthread_join(worker1, NULL); + sl_pthread_join(worker2, NULL); + printf("Count %d\n", count); + fflush(stdout); + exit(0); +} diff --git a/apps/memcached b/apps/memcached new file mode 160000 index 0000000..34dcf95 --- /dev/null +++ b/apps/memcached @@ -0,0 +1 @@ +Subproject commit 34dcf958b93e0e662b087f54a63ffc49648c6e57 diff --git a/apps/schbench b/apps/schbench new file mode 160000 index 0000000..79a4c95 --- /dev/null +++ b/apps/schbench @@ -0,0 +1 @@ +Subproject commit 79a4c9524777897c6941af2873166f1ac3446f04 diff --git a/apps/test_net.c b/apps/test_net.c new file mode 100644 index 0000000..01142d5 --- /dev/null +++ b/apps/test_net.c @@ -0,0 +1,50 @@ +#include +#include + +#include +#include +#include +#include +#include + +static int str_to_ip(const char *str, uint32_t *addr) +{ + uint8_t a, b, c, d; + if (sscanf(str, "%hhu.%hhu.%hhu.%hhu", &a, &b, &c, &d) != 4) { + return -EINVAL; + } + + *addr = MAKE_IP_ADDR(a, b, c, d); + return 0; +} + +static int str_to_mac(const char *str, struct eth_addr *addr) +{ + size_t i; + static const char *fmts[] = {"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", + "%hhx%hhx%hhx%hhx%hhx%hhx"}; + + for (i = 0; i < ARRAY_SIZE(fmts); i++) { + if (sscanf(str, fmts[i], &addr->addr[0], &addr->addr[1], &addr->addr[2], &addr->addr[3], + &addr->addr[4], &addr->addr[5]) == 6) { + return 0; + } + } + return -EINVAL; +} + +static void entry(void *arg) +{ + uint32_t addr; + str_to_ip("192.168.1.3", &addr); + + for (;;) { + printf("Hello\n"); + timer_sleep(USEC_PER_SEC); + } +} + +int main() +{ + sl_libos_start(entry, NULL); +} diff --git a/apps/test_rcu.c b/apps/test_rcu.c new file mode 100644 index 0000000..9b59c32 --- /dev/null +++ b/apps/test_rcu.c @@ -0,0 +1,131 @@ +/* + * test_runtime_rcu.c - tests RCU + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define N 1000000 +#define NTHREADS 100 +#define FIRST_VAL 0x1000000 +#define SECOND_VAL 0x2000000 + +static waitgroup_t release_wg; + +struct test_obj { + int foo; + struct rcu_head rcu; +}; + +static __rcu struct test_obj *test_ptr; + +static void test_release(struct rcu_head *head) +{ + struct test_obj *o = container_of(head, struct test_obj, rcu); + log_info("test_release"); + free(o); + waitgroup_done(&release_wg); +} + +static void read_handler(void *arg) +{ + bool ptr_swapped = false; + struct test_obj *o; + int i; + + for (i = 0; i < N; i++) { + rcu_read_lock(); + o = rcu_dereference(test_ptr); + if (o->foo == SECOND_VAL) + ptr_swapped = true; + BUG_ON(o->foo != (ptr_swapped ? SECOND_VAL : FIRST_VAL)); + rcu_read_unlock(); + sl_task_yield(); + } + waitgroup_t *wg_parent = (waitgroup_t *)arg; + waitgroup_done(wg_parent); +} + +static void spawn_rcu_readers(waitgroup_t *wg, int readers) +{ + int ret, i; + + log_info("creating %d threads to read an RCU object.", readers); + + waitgroup_add(wg, readers); + for (i = 0; i < readers; i++) { + ret = sl_task_spawn(read_handler, wg, 0); + BUG_ON(ret); + } + + sl_task_yield(); +} + +static void main_handler(void *arg) +{ + struct test_obj *o, *o2; + waitgroup_t wg; + + log_info("started main_handler() thread"); + waitgroup_init(&release_wg); + waitgroup_add(&release_wg, 1); + waitgroup_init(&wg); + + o = malloc(sizeof(*o)); + BUG_ON(!o); + o->foo = FIRST_VAL; + RCU_INIT_POINTER(test_ptr, o); + + /* test rcu_free() */ + log_info("testing rcu_free()..."); + spawn_rcu_readers(&wg, NTHREADS); + o2 = malloc(sizeof(*o)); + o2->foo = SECOND_VAL; + rcu_assign_pointer(test_ptr, o2); + rcu_free(&o->rcu, test_release); + waitgroup_wait(&wg); + log_info("readers finished."); + waitgroup_wait(&release_wg); + log_info("RCU release finished."); + + free(o2); + o = malloc(sizeof(*o)); + BUG_ON(!o); + o->foo = FIRST_VAL; + RCU_INIT_POINTER(test_ptr, o); + + /* test synchronize_rcu() */ + log_info("testing synchronize_rcu()..."); + spawn_rcu_readers(&wg, NTHREADS); + o2 = malloc(sizeof(*o)); + o2->foo = SECOND_VAL; + rcu_assign_pointer(test_ptr, o2); + synchronize_rcu(); + o->foo = FIRST_VAL; + free(o); + waitgroup_wait(&wg); + log_info("readers finished."); + + exit(0); +} + +int main(int argc, char *argv[]) +{ + int ret; + + ret = sl_libos_start(main_handler, NULL); + if (ret) { + printf("failed to start runtime\n"); + return ret; + } + + return 0; +} diff --git a/apps/test_timer.c b/apps/test_timer.c new file mode 100644 index 0000000..00968a0 --- /dev/null +++ b/apps/test_timer.c @@ -0,0 +1,61 @@ +/* + * test_timer.c - tests task timer + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#define WORKERS 2 +#define N 1000 + +static void work_handler(void *arg) +{ + waitgroup_t *wg_parent = (waitgroup_t *)arg; + int i; + + for (i = 0; i < N; i++) { + timer_sleep(2000); + } + + waitgroup_done(wg_parent); +} + +static void main_handler(void *arg) +{ + waitgroup_t wg; + double timeouts_per_second; + uint64_t start_us; + int i, ret; + + waitgroup_init(&wg); + waitgroup_add(&wg, WORKERS); + start_us = now_us(); + for (i = 1; i < WORKERS + 1; i++) { + ret = sl_task_spawn_oncpu(i, work_handler, (void *)&wg, 0); + BUG_ON(ret); + } + + waitgroup_wait(&wg); + timeouts_per_second = (double)(WORKERS * N) / ((now_us() - start_us) * 0.000001); + printf("handled %f timeouts / second\n", timeouts_per_second); +} + +int main(int argc, char *argv[]) +{ + int ret = 0; + + ret = sl_libos_start(main_handler, NULL); + if (ret) { + printf("failed to start libos: %d\n", ret); + return ret; + } + + return 0; +} diff --git a/docs/imgs/overview.drawio b/docs/imgs/overview.drawio new file mode 100644 index 0000000..9b74560 --- /dev/null +++ b/docs/imgs/overview.drawio @@ -0,0 +1,796 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/imgs/overview.jpg b/docs/imgs/overview.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0a225fa1cf417dc983b3f7a97494a3a201ebb910 GIT binary patch literal 114085 zcmd43d011|_ct0uMMQ`wgMyGcfr>yYQ)NmW5Rqb36a<7+5g8(;R*?uHM?q#WRV!2| zq{u8nWLBmKVW?VUCK3vns*I6yRN|2&C-1hud*6GX`|Tg^dw=(j8=vqz#+=RCd$0Xj zpU+xrt9q>x;#MCz;Bf$_rly9=1b=X8;bAJx@1 z;g;yCsq3n#I&cIWPHowLjg9+1UTRC!mo8hbu|jjD7WhKlYTObvb@e4n)t4<>x)gjn z4*WZAsqV5h8^77JeC^TG8k;Wa?Ye$Df5qm#Rc-pm`r$1$XMT#;TxnpqZoSb~GxKfR zci7t5JAAv_ao>Ko1MUYs4tXCx;p2PK&p-HV$hpw4@Q6#7qpn2LuEr$XNK8sjNlm+R z_m_K_S-;-Teo#~H8kbOcwFMgSq8czLxF4=!g?Ef(@T`;dDOP8uI)%Y?mwI$Kut**Ot*~V{{ zui0}{UYnOzoeU{jW_d{{Pj?{^!L0 zuX*uttJKxN=BexAh&T+}_8<-SeZr^lLwr4m!pD{@MN-G#!uWfSEHN* z%#PL_t%J2=O|y9IFe-Z{+bnr`jo3K-#6WFoR8OdTf9FDnSLsZqLAc9b-@0x>i(mma zZ#PVAkAuYcM7jgwAg<4qSg3GG46UKMo*gecC+uor{ad+^#B*L_%J;ppyhFX+-ssi+ zGRm131BmUcQ)wmpj6rKKK8F)*{hG}sR-0uY#wT0Q49Pr`V2U}Rm3UJ|pP-i1=`>Y> zG;~t+Pwt5d7RhO~fez1M&b8)c!bpQ@g#$b*>Q1<Ljm3Ih~xk=(*2A;^ta-^}v~Qtxkel7DlG7Q272`DE1D=Ox6^lKiZ{h#U{c4%BD{D}egXtSB>#Pt+eI>+1 z?CU1@yS{>9Nw%O35A%d7Tzl9fOT~9lC*SaRCzAtX@7uTmJfmqd03=$!T>XO?t_cM7+$#%ohkBacGXFD)GWpN$Ex0lHH$~wjA{Fz2h z+QLZ{?i|q&{UFkxpL<#8F3xaVY!|o>IBzdf;QO(SZG|+idGDiep&?n%+Un7QvDW%1 zr=Fk{Z|k2pQ{^T!Ru{RpGEu&t|Btotwq9#}VMe+2T(6L6O5gNYnO!%fFkkkIgQIJc zC@@&ac#15S97IxU!WE~rvpQzA%17s^0f(CV?;H$Hkj89N7c=?ZGDn0c#Y+S$Xp;TP zdQK!XT0GdE?NWL7l8`XU?q;oMwiH^g^N4&jJ|lfZ;~Yk>!$k3ghS*1eZkHP^hZe~x zwiMs~3d^r#Sb}$ zHFg-C|Hcm;Af=L5wC-$+3w>bXGTFb@qoIFm1MApO_It|Tnd6qNH4m=2`l3Bw+{&pCdnk4HK3vkXNu)7_)A1mea}b*j{u}VvV}lYUSz(X)ilWEm4w$yj*}c?D?@# zg-ciAs_7aJa>f~a{-md)%N>uB7am~9Ub!W6w2(1OuG=32_e%yKQJDmRxTodE)^A3R zp});)boRjcts9{89v3)png>y_BR_(^qvu>brR;V&J{_W3qdPL$ax2CFc^PzCg&S>< z9Y@&GDmg`Q2{wI-P`QF-%`S*V8*yo~rHFN7aoO93;!~)g8!~A9Dj>qqFj-n>;>mU0 zTGM}i(48Ycm1jC6Zf+W6TiI%PMRze)LT!=O&8JHk6C52CF3kFMlbAVIBR|b}My_Gq zQsKgxrr4{Xuh9e%c?0Sz*0`o12=6zajwEt>nL%3p?*QBeL@bwZ9pkXlJiSaFDZP{L znqO$1IwJ6^8^jS0yW+7w8$Y!}*CxI}Ht;#SaN42>Sd3eTQD$jo{ZHLCxT}P@o-apKb_rppH=nL#`~xTs6mi8SuN{ImVr?(e zRhPzY3tD}9Ajz$yT#ryPh*c;SOutMj{DLAY zl!eW1@_m8-oOtGC^6Ci8X85!+TX9v=d&8eOHzxW+5k%FlXNm+`VI1|3cmjXgR7>v9 zT)q!NY>qWn8C-$*yzEhr?*+Reh_DZo{7Hp_kFiFLWh+r%k+lZ>92zSRzeQHr%F^kWVU4#YQTJk0*Ekc673J1VH$)DIs8B^{*z>m9E!tHVI zkWE-UY2x_h+h6IMnz!rAv{~})?Vr@%PHb1+Y9@)P{Ae#_;aC4)(#okAMwKYe55Om0 ziw5A6HP>0@E~+pl4Vd>YSZmIZ)fdPqcc!rInQaR!y**Uq5PxRgZ^%yb<8054fs;Ec zdik5bAYHd071KuV7W}h2bF;xBlg$SQDj9~@D-6KBh?Qh&bN3LUQ^NLmM0@Fh2*3uV zV4GTp7gD11U>V!>t~l3V1j2`BN7hy3=6>`w%HTaCz()nBm3zD3b%3l=nM*t~zYMhz zeF=BBAfpAX50WO*dTfNS5;`}(9P4dd<;b{Q5thZ#Q;;QrHXX7?jTAa1Kf=(FeNAB@ z_9UM2-h+jjfv6j9BhyOLX8qbwp5=oO&pmXHasJjOm zEzZGb2=Iy!s{B~k;xIGAntoc$Nr=%&-QvrCl?ZoZx}GcOl<53%w3v60suwnSylK$+ zwa=9L9GPDOK+90-qH#SRz_kDb)+4T3Ssg!(ljPgcK~Z$VB+`d02}@lSw40g=X|!6J z3=cm)U%_tQ6HPqwQ7UxQ0BU zk?T^FBWg%)wW0MpJBV6H+`(zghDQ&DS>3H`TOT9)7w^!rI*@7CmHp;Th*?%~U^vex zh^60kKk9w7LN69uc}a!SQJ_y%xTPoxTw$d+o`!I z(grj)i#xHVDlZQ5KV3ZiX$$VEX&Hzx{H(bK!?{6%)K`8yJZ3J&^>!gR8lE7?gh3G= zX9wuMA>ylcsr0~0@Pk=V+lXCytUX-Jc2w*L@KadC#fDNsL(4pLD%9X6SqQJy2OS^rVm?YmI(c}8gZ@{ z4KqwA_KOU*o;St5ap7TI4(3e(SP)QgsojA99}H`z$X)4{umXa;nd=l6U_nL{NfYC7 zv>6U`7iHe%-h}MQ#M0xF;-aE-`GW{;ct+4FBX z)k9t-b7U@?&yAUTnq?jm)&}}FXoZlRV!9UoX-c+sCc6}I zNO9g=IcwyTw0I?latXR>P!tupwWV;)Ux(85&OcMXsk{dezzdR{>udIV0K!YC^o0C` zr#Qo(d%S@MMKBE&b_kn+oK8Et!owAYH9|k{5@A8os!e#;~rNxP?wE2 z@5#U5E(`bqG;&*%*~$2d$BAT*k!2;xoknhz_o$*?{@pL+Cs5R@`8*Pmjf^;Nr@#^+ z)Se@E37RJMS(OYeSOyKx5u^2g^M(q2|F%wogw34VUg=s9lp-Jye_)|o2;Ce&f2>hz zCCYM#_`5zxcwBNkV-uQA5#BFO=ro#d<6Ogbz+Ej8L27nXx&Js|r2MI9&x-NC+WDsW zMPb;EcyNA3h~J?sOiP86uN#Hh?40#gxMgOr={*E5c28N9BhZy=8?I&A4tFin?Ap6BF5;Ak9ZKlCnBNefusekE_qKk~n}2 zVNH#dY_d)b0{Hk4dzH7E*~QO&RTN%;x^qnlC_%(D z^$Ww)Z@sz9S+eIL{osgIt#lWkZyE7{{KUhm$c_V|*kZ5L1245Ul-9{9Q!%R?Imrv& z@||IMU`VRmvez_0`YM%(>5qxfd#jYh?Lo9Glj^ zb+AH8w~(}uM49&^Y)f1M z&>L*|o-Zt}n|T9Ug^~*}z-Qg(7D{RRPWDAnSn0%?$#l$^MiF=4lmO%{M`HpBT`}Fa z7<%;maF2}1COzyH2gYcdw>w&nccd$|4qD@NzYLs043QH@s|oyaSt#Q87A~?s9&*P*gGWGOE~l~BWP~s zn8&u3fS>Bi7uSm(!ePcn z%C6U8oF~I?c~&aiVEI2n?j&OcW7I!X>aD_Q1sqm3RMfFY6a0oNt>}B?+i1PA3uyFJ zz!<)!9f-7l)hl9kQ&&<<@knc?0M6V~f~FrKthQyRh{gQblY{3&27hpj@qZ~Mjg^R) z*N7BIhw%qN&si5?ir8tcTgP6TKVzl4ZbhN17hx?4C*^NgIhkrFNXFFBe9>6NrNaZQ zn`z#M2kkr-APvR7I{0utMET*<#5zZ1;p0D4xQIbN-87pWRT=4BetWjPG1~RV!53BT zmv=tdYLfNsnHB1#>{Q13S*V_wq{5viXAtqNJ874=WL@Yi%LqN!3pCINq@lfP%G6+_ z6+defs@R;TkA%*RBBb^jighKean}23CN-6eZ#55FjO%c)k2c*2e@gXA2**ro>*NvF zR&IOnzwv&UHEZ#DqR=qmaIVzK$K<`{P@af9s9K=@M|Ljz@sbKS*eO!yh? zrxDLc3y=vz)0a$>4J+R*^n@P0>w{kW9lco4m&0AmWJSqb=vDGH*vkn$WjZH>7*DRz zh;Q~Rqw%;Z+zQ24wg>5yM_dZE9PRZUwhQ#^Xx)e~%C&!~;hPnSnYxsi%BDEk)#B93 zI!(W=Kif+dTyT2eB<1Lz?pqpY3G!4 zvhe36n)`oxw))`Tz99B!O$w<>18O1xz32?o*vmBBLX5==qKK~0R^cA>zy*M)ncpCO zzUZ#7W?koe^lR0FL>(|-u~ysEho{)vfb97IJ7DP!?M%hDd8<{pH=P8ym9bof>pzDc zRpFA?wi6Uje#iShXfD7Ed!GpwcM4V#VKHr=3YVn^wSPv9RXDdCkvq15v;ulBzDCUK zMXj=~5rw}I)QfLg-8F|U{c3}j^1lt7#)9H0d z#{2E-joecf##qMaa@q$Og{nh4*3oXc|J*pSIh`|VD6>L7aq&q+?Rm~Q!YI_mS?;Po zqf1K=@~%~4TJ&uQfs0QzDm)hG>?q(8w7f*+y(?+NBew!xzD+*{SKP||@lIgiuJ4Vm zrZax{!jiwA=LgFY{vjTA;Aa9h9JEFG>sw_WF&LabRKVp_HE??qjmXSNlv5GGu&%u` z@v?(c3!EYSV>+ax?cMGWGoKpOi+2-N#2)LPTSzDA1ilVi43SWHizym!1t8g)a0R3` z*oqC{p{2Q~2E1uxWPljYfe#YWAuYz5Hpry;O4sBw#&YF@W*gyX-3jW)=-BcW`}Rqn z>~p6t+VZ^r^4RRLxj|&!4y;;QM}(;=ob-vHt_?MVgq0d%cDK4@EkeO`5qT8E^?=%6 zBxB$_D>eZwq{1OrArasnOl$NoQM94^2F3$NeG9EYX@jjXQsE9ty@enlbxF)gVmQx2 zD_jj{2^*UAMYCTPz(HbPCd6yb5cCyWMK0cQPdSNex*ko9F6#PzN^ zZWKAp0hDYt8jG!=1kBns+&tN9`qZSe@fj=htgclbUQ$UOo%KR^3jNKP_FWJ?8Os%p zNOBkZCh;-n)&hzDa7>)VC2FDx9UQ&ptFT0Nf+uJ*Vs?i)yFUp03mpULv#taYFKNDu zQa00SfLNs^lkT+79!_|^IEbyN5|TyPzTUL0#9*EDcBX|5)qBC!`g8rqhL ztwSS4M3XSnm3P}!I3u>}w*hqZzB5&id6${mq)13e=}og2;3Fez$I>axRe`jJyEjJe z+N1m$O`4T$MWQ=6OXy)pVDDU&7`jQ;inMD2?rY$@&!Ih#A|`Wl2y0%Chv#jshRts+ zm_0>EH!Pj^6bpi;7snft7Xn>Pe7fGx3@FVTo@6?36ZR^vMKWyvVhs53iF0vO`Tp@~ zYW_GqCcazPOkDr2*3Wk8a`eCj#y0mg=tP`dZ8{}x`=bz^>FepoZozNjGVM}ukShdV zvQDGh2&sH`Y=yamY=koR!cYR80N~FOCZ^E?!~(sUMJQPT(jBW%sGGI4E;6=*v6)v# zN%ykY-&gCkV_@EUcp>iaJ05B9vOFMtY%C=*|BjpTxRMP~iLWWL1SLqqUtlE@)rjZk zNB2pf4hz<6ri?vY(X0t4%hJ$2hy_UU``m*ax&~%qelsa>wom(gts(sz+c8O*^`Q(qoK$)8vPhzb!zoshIYb0Tqr?-egP|%{hvu%0v90SVuDpa_(Kw>Bd-5iuJ1dDr)m`;Wdu+urK6m-|+X!RIbnR`ZA?79W@!D}hL9iUr_ z<7|!(DfAk1yGlyGliiu}ujwr8C3e1yv!|@3h&^MWxh3Qq9QgaIAMuHK`Z*%j+(w5) zY`L5GS65Bi1@dx6??4zq;|JuW1v)}V%yybCIgG{`-ZarwcoS|^e7u3EZ>oP}^&;L0 zO1MIP49!hHh{!WivMh;bq3f<2Miw?dnswA@Yl0G3t8Bfl%j^%f{2h!SdWx@nKUFnjX(HvEV-V&T>Xo|&aOgT;zI|p1XLxbMFrp0%? z9pUH`L+2AY)m>oamTpA{5y0aDVX~|eh|2r&^I&a#?4S$)(}ENzRDh@XlQ&>Ji%^>L zM#CFlszydi_RYF+8?Szb>5Nt}$e+zE-kEaUMP5Disc{VravL&7LKCC5U=xf@iVa}5 z$OZZ;HKrx2Hb*rXAW9S|17y0O64SGW#4+m%kSS2V?;Ck&Ae}6-}b5qb&`2`#sE(-o8EgCdw!3+!6Ny;(v=Sx&)=YXQE`H zIy6;72W*s^D#Bx@0P*~9GTTb^eIUP8)3JENpOl1cRS*%Ka#wn;%|0Y6IVy7R`BkhP zueisaJiV)ax7z0V^GfU=8RDEJnY_W+L<8I>xpRIgeIuG03p2$H>8{4KK`wx+)@|W3 zf1jc#A;Dy()pTg5!d%=rxX@zlH)wsP-|qQ&>buBF+p)WLwKiLyCjHI5Xy_mZQbLlI zPCVCpy>+9qSfNSa68Qvg^tA9Z!=7fU9l(uwlLwLxmPR9ET}P++tQ&S-6z<(=U;f_? z*?D(Oa~=j373aiKLVui&re4%zD9MJ57ePd2A>9vnoDk-83$$h&-4iimY{o#h>x*N6oPK838~Zm{b9n9>Z}9z?*wjq@EyK3 z>`RkCvB#DlyWYQ;);qe{W6Pagg%`Wh{Km&qj_*I>cR%IC5s$A{zUTbMULDaW?+^O7 z4f_`r@|`K$?drcsk$+JkxR@6GH~%6<{zZkT&3yFz>tf=rMt2i*P%hxA%NSn+CGobzEM20)>Cvsw>mq^r?CO~;{|$W7 zO*^*c44;}5^=(}_;p4cer*_2K7t?m0#E%s&4-5gzZAsjwo}s+cszt_nIMpR{FXc`V z+0v42oXYs~zD9ocO{;Z`@73{p)izxJZbS6-FJc!`sl9Qk-lWh9a0-9 z@$B8_TTsKj&&{9OszpmCJEa66p^ceJUbisjQD=-#Ze2U<{9U$J)8}Z%iK3-Jhf{wK z@T}gWKhjb`Dw&cE)Ly6}#csG&bUY1B27xjT#9kA((@3bSW8gBA3DlrhSaxOAX-U(5 z#GFe^EGyd-XwBs$s&M$abFFY?#_zPSUE%L3yQX=08xe#2$f(MQn1YuvUnPT>E-Yfb z4-pu&EIt=@w30%=J8W|Rwc5|83ytuIDg9J*!s0Hk_uPw1CZ+Q3kB%TdK>Yt zM*t;Zgy(?trGYOCQE&|7$tfH~rw>s7Z48c(8D738zd`yE9*Xd__l{WI?(Bf<$)|*E zJ$Tk^fxi5F@NZ4;(gr+#Y0BI7&OAwTWBrpmn`RreDyRN;Oz_1Bz>}5&epxVqft}M?~7~m zu0}5X*|;`AYLA6!2w3nRtZGPUtTBgx8oaNlL4~t94skazz+^w3C)9Lf>&Zz1MUQVg z2gV5K7OK+p6W3a?hJFAn)s9x7G7IRF`BAp4AGUf5g8wPBLDYLQZz;^XS%*&s_Rg+O!+2A_^+!*p zh~01h!B6?~yPZ|Q$WKt)Mmu8*m%Tih5RDjV7^(l1+2fGRF!%rdhsIR%8!su*!jo1N zMc*{uw|sXxNo@G%CASS}e=MuyFH%0AvT9@Tvmq^8F0>V$wx)T6##`F_ixO*M~Uli@eq+B=(4 zq|!G(Pj38C(})0L*60?p66ATfg#UAsb6KIEi0`#3XXnXp<3Q5qKUc>6-%O)xf}LxN z16ac3AE}`T#?nQNTFSkHo^|i(Hpl_EMf%}a>3~}eY{Cs_Ke^U&{8`$oQMEg7X~D~8 zK+=hL7t;jEC-*k^i_dd(q0ebW^@}mnpI`siddQe5#51H(jgH3i{J>9h!>~`+_G=b# zB33eEJ$$7lbkonzFBa3X+Wi@+Vr{b-bFD4D{ufFp7xmI^-LcqlJ4bGZW_1%+FznI0 zvi-=X^aVi*V?7l|NaxfDnDju}BI{<(Gyav9a1qmpz6-h4va4`n%dlcu`AjRJxzFw; zn|NZV*^=#MT6A$|qsMziw2-LKXJeW@^1}*WnDb0+RDL zEw6xp^!7!a0&zy%T#g?+l5>{p*=L*Ue*7u^@{Iug^T*DKx-Frdzd>!^K_O?pH5Zbk z2Q*C%8nN4!sJT4b@-rTH$K6(K(b!Gzh#C-ve-dliw=jDJP1(#apt0MHh{<#p`A$Zk zsTN9tKlu+d3g;Njl61f;y3wY<*dr%C?=?}ToLpzWvyERk%9 zS;)BQAN57ml-sQw-~sVnr3o{<-tBX4PX z!v2#rY7I(X@xlA#yA+z~;WL~OOG4cT~1vC8#p(=FH^4UP?pEhVRJ%xYH{ zbGHu}dz&p(YURYA-5J503zZ+n+AwWpPOB+sr}0f^=!Z`@R6jxfT$C0+6_(9Mg=8H^ z)5YWduP9?|^nlgP_krOJBheAki<^Hf`R?zpcW{nCTp)Sd>^M`g=yt&3mI5E|?psEb zGSMy33K=jj1`;~V&4XsBFbq_03^pBI*3 ztJkV<9|+&+sc`i~;cdZ(F-&t>g}WxG0ZqW`3*^C=rilvY!V%`NP;I#r$mwQ-ewlw6 z!Chf*tymj?C3E6g=yxDM{uzh!{x_?b|6+c`v7+%i$&!_)wHU7J+$*ZHAB9n7h5p=c;zCuc9b$$XrufYV>1J0HO?xoWnrH?93 zyG-PihK=|l{J_s^i&}*B_SG*}^%ZuVEOf(lqH%DjObx9o4x*4jMxOBpS+_RM5uMIX zDiczZIm;C%P6wB!YP!*7x$!R$`QLa_ARL9cxFs=2TPqXusE5xKuwJH z-FFa;Mp&Xc7MGOB7bq;02l!t%Mkgw~if4@KYsygdt}FGx*Urv57DD!t1y=-3W3RCS+L!`bn(q+cvQ=&55Xw?xpIXY;281q*kZVndh6Q`JK7N z1$w?eeiNpcdh`D5J;Nv*ql&&AH2UB(T5<4X$9$6O=4ynL;Ou?Y+P3A`pVigXHT$AV zX6*0ojE)JaDjM&$OJWz2xzy6|?8KJN+&9fO?Ncr~oK2uPTtL8H<`H?EFp%os(*g!j zc*odL`8Kjk1BV`>q7x^qfi^I&xpHgJ6YVbIo98^5>W^cQ(1Mb&bk7=m+UddhcPiZX z3_XPptkEUF(JqOjxdh!&T1i{p_G-iIpt5s*;FaGrxM1v&>C|F9?=ay)!<3-pbD^9qBsZS^ysJ?sVM_JG7B+n%cwmzh(+FJpIIZ8fGvkkTwl`Lu{_^xYJO^98=J zxuI`k`u-%Lhv_}ND;7)e`Q?+WQA3b|B&7?gljpcds2IC$cmvz1IHJsBXwmnBB7fLr zFJJE0C`D|Z!2qo#9j*3#n?P&G#j6!4!_v5;F|VY={Q<==+Q0Ib%y#aPCwcEC!UKAV z;T&`udCpyK>=j;ufwXt6pq-i+v~}p%MBwIUj_lM;!`OC8Rp8@WBUBy5Hn=%vvV*mn z>zo*z7Pz*(0ja&?zAo~8*CREV7u{G+SKXSvIQ z00y)qOL`SR=ky44p0tFlutO6-r7AQ-AYskAQ2u!{zWS2^}p!ZDd1W zT^?Y(#)+J0ak=%V01PxYCO4s*_SM#Fm(%9E7mE^k9hM}KZ*nv!!>4{qcipA%gdGwt zqxy%e%3?0OYY%XLTdFaW*&_E;XuuV{UEeyNK%BS?g9at5!r3AkVFnx2c-lKEf~jV z@=qk)eH0Wp!#m~{ShmjnQ7X_bOMr zr$)I9xLG!w%!Srty<`nyC=n03Q+niX^s~y}UA1UVGQax%k1toyy@heXdqHnLdo^vg zJI}=6$D&Wk3Jc+U(jt`d1L9K?X8AF9$Kc5-uLCE2#N*dAq?Y<0=A{9W-kXF~VAPwY zuBbZv2K1z!gS+DwOx#5R?$E7(1TjJPLXuV#MJu-4xF^VE*Y@I(kiN^aFC%@)>4)PC z59PTW!iJOI66QY1Bf?qwcI@=W&*eX0gA6?cVAE)m*r5Zu$*O6Pn#(N|ThXIPfvB@G zHze4kqzj+eIL>VE)jW8J;idj#uPW9}&wg%K=3-x=y)u$A2|5eu2pIjW)sd|ZjMv$nlhW;C)(X%A zbuI^)lngNp(Hzq%#8%{Xqq)J+^q>g=)ZXw!zVd16Aj-fpU z2_j3IBDh4M$B|@Rb9RycIOFR`?CeeVf(OO;xi;8F<_jlF&EZd=NrIfguuwkWgjt$R z{e>EL$B^5Zc-mS3asdb80@@}l>g%7vyd*_1A^Gj_W$A%;E{DXO(rd*0v=8J&f+1$2 z_z^Cg*o_8?3FslZIX;aZF6@;S4n*~A8i91_E=Ukt2pRJI%vy@WeT@<;?}>BOeFwaK ztCA)U3>8*oPI?Sk&t+TOCz@?WtQC6ZGH>1jIEJiRp0roKiyjFlU6(s5E~AskKG~%@ z^t4Ej7~}-I-iPPg92|>pSJ1Us`vL^(6i47&_a)P|#FexxG8#{JiVQD~=-cELEB5mC zs!4vq5#Ax{gVNJsn9603Qe3}L{`L_XC5trL1Sy0y&0EC+?XhW&5itzNiHlVutzS`G zt&=4FwN3S~dz%(#{REoiw-r_#QyfMDhez&SET^zmiL&RpF;g0IOsSg+X9f-eG`jLj zmv*T}ip2=$9Q(5a@fuO)<_fgljMneGJJ=BU{dqyHukvShI&1M7Exv^!i%{Y2UP9FY z0r3`*vELiDadag(W;?;hR%ne1cbb_XSZrn(`*Jz3Kj*VO^%_BTn~9o}vAPoO4mp4i zP_8lyL_#qxL4&hEQ`oaX9!FR^VS-={@+~xVU-S}OF3#$TBa6DFcjfC8fg{f9pi|wb zyMvU{n3cfL$NDBTknWqnsW>ssc_HrKXc`Qj1iWut2Opo0DF;cPG|$bz6j*R z@wYGu)HWSt@rZAy<~-*#IW4n6wiwz$zP2dfGPN^|diG0>JdJ~V(gHfNDcd4H&Qi*J zu3pHf$mQ`TUn>kYn|Jr;r{()p9MZ9!Zl@3a_Eadlz$+Lc#20h@F3Q&fq;C2|0m%v2zcBc4dvzrY$Zq^rQi{u z9GJeH6S~S8XX*o}Es$;~|3Lt??IsEzg07k3S`uo`>mZK~$leoPyNe$+ep2j&^sqit zH8i8d&EO6$WeP!~+Z*1_DCZ0AQ!kvakj}jeraYZf;kG{$%?@^Sc^7@_b|C!4p%?GA zTd}XiT)kqf>#4tfsNk6Hk<#Caj4hrN-h1~cmhjq(v60aQz5fl5T0+V@cywzC_OO?^ zFbZ+s(19d62!cj@lL{wpWo(DuhcH4IuZRuo6wDZE^Wq4LpgO};;4i-k;1g4=`q*F| zPMO^705^&UWvkHgOBJVP2x@c<#C;zknRUD{B_53A9YgZ6bbfvw8)Y}s7Pk}j>&RqY zeMi^}ozzD`4reJ?o;yI3ExQ3|P7VD8*!`2rpUc5!C%-_Qu-hQoEsIv+I_ZjID%{^M zK5)=2A@jHB{sQ>u{7um0Z-NMfI(LzIY^4W66gP086^wrJsyg-!j&2k+IST@3Vy<6g zV$=B(>KU->lv`oMV($$*)K>oFg4O2e$4~2?*e_-jEo78x%qkfmVzn7Ve?MymP8BjM zdkc(_Dhp>n4RG#}Uhk9TBVe#}`EEYQAbc?$oV@t#h>2Y=wTs1X@NEH?yySZHBM3Pm z%F9A*E%ign#ni+)Ydo6v?c9UmN7Ux+5|cGkf%`5zUSD)?<5ltgyu+)yau3yRHu~|G zzaMti=|^AiD}PtpM7}zC?()ME+;-di?axo#%uZ|a*c&0Qup0QxD&sUT6~9hOb3nsF zfqXxJihLx|2GeA!aN9$eh z*BkYNUF4=YzrVb~DLY5RySLO|f8D{^({^IJFd}$j*G2A*U#50ksQKIbVww3?LYmYp z=QT|Eq3S2@0So+zm$N#j9y-34>jY8LFGRYKz&T2YC%(Z8>Z!_f!Ai~ogT8}3>WGCw zLO^NjN;(MqPnf1uegJy~X?*ciaJo)tF;l*@>9f>EX%W_|8SN|s+ZL0eK86K~@61jQ z5(IRXl9sp^Oc~v{{n^JCzw^1?j{A3iR2l#$EC_%GoFHKZ0lmi2<9y!KIVzj~@=EVgV@`ZVmS@b~&D;+OnAZgo917w-h! zeBX=iwCkICeNt~5-c>PmoWu(m?X&)zMHBHOU?cBK~And|=R!?*Ners~v zzk<&s{f7|;+Y;XuBW8En2NH4#j2(x(FF1UP^7ZwZxx8;#`pFnwT1<2QJx0jOhi}U? zjOF9UBCl=^kR_{dzZ61k#=!Y$N)WFWZZ)4KKWMcCA75j<;K=rJB8W;y1)kldRF_zT z4d7ra!@=?BL9CNFXXeMaBI5Pdic93eyt!T3%!)wmONC|5+xib$B7u90s zJm*>c))9r0M;ObiFJtSJz3~t7=HK29} zL_-+eMbe_-W$_?&BwF1^-wm`1rvqFI2EB?VQ!$@pAbi^wNXS*7P0sy>EkW9=>lRrC z!e|I|x@~QEW5_SnaYxA(ilx-i$X06xFG2V%NDzo3*jl z%?iWj!3M3QDO}!@5?+QqcsbV&P&`umAP)b~;sYcD;s1^oqJ_PRm4USJ@CQkw@+)=^ z@E2dq!=b}kzI&cupu+j~-kMUM2G^MnqmgjU_YEVh=JdTVk(+DuernV|V!|0Ymmt#K zH3RS4+E&}ZL6+y2@Ov*?iZd@!^(wX{ZQr`RYJCzX(K%2=GC`s55$bgXNi@Hn<{NNr zJZ!=YF0Ly^)47W+rMJZnj^}C$Pfdh3z?#8#Z0sujtV-1#yz=G;;x<>ulMKEgq0S)< z0vrB1^n?=lbvQ|fMtpV!wEj>#h}T(q&K{L$Xy=zJX;h&=cgEEgxsnA-x?;Y&%QZDc ztkVh*heVBA6^aus>49$f=ppR`TJktUx2!_|pMnLqLM4SjC1OSj9PHX4U*ChU+X=?R zNNawOh2_2j=xqrBj}9J3Co2@|?(RV^Mk8z8kmze0Ytg&N=5|v%q)3pW5pNDF*$H%F z2vsz{VrsD@gXiCmBZk<`8_;~)2T%rnEt7t0Nkw*L9tfGw5UF@;9#-T~CM3N~B#Ak*lA#_u3aUPS}$EmUe zm~?A_m>ep5s(fhO>O?oKhpDNN&YtirH@ce9%c_}AwG=*upP~U8o!Y+bf(>=snh!5R z2~oXE!@;c#LHZ>8Dl3YzoOOxvma__q2BI?QUO8s*5!k=##RHLEIWjPO4e@uPozeo6I&zh=d+2O$CkSPzp>_o)jo9wYO)Cb7VTLePvFCxB)j zq`ZT(UJ4-k9My;(&U4i`N%v)mLfh~_L1_^xK+5AM8Ja zM(NxuM!olr(7i3+rv5FcA$Kru#<=xz$35NA!o081WO*cr+1{?!(~g49jUq^cdl6X+ z?yLPwR~OGGH75Oj`N_xd=G_n6fHSSa{^aY|X0;j&UM5vf(B7+XN8#VH*9L|o?P~Hm zG0On;eYUu@H|GJwO6a$|P?vkGcYnXMGAhvj?LfO$Mu7j& zW#wkCB)yPYk242cXh?H7sFaD z;Bm?H#3uMvg;~y}pR%J7<`2kdeDg^<75XvvG4!I)I&?`5#VJ=00c(E}{8?OLN*9(h~VWN_J9Fcu8CNj7&r9o($TaTaF3* zf$Dd%7+2eATWb6H?a-Mg^HAvgO>!-(lPkxerrqLYC0vtG2?yX0GA8>|Sujtxh9^a} zF-vmqSikhzVoE?v#sz-ccnAh>%%}1I4tS)1u zfwUK>fQFO#*0Q@u45QZ`H1|g38fY5^C`(&6&X8+^SVB&sxfooHS*CDQ7SjXdE(*=T z%NZ-d%nVhy<&T;@B`)N)yxl|10pdZpKGK;6J(JmIlM?5ms0Jpn9&*5Ql!&od(8-L) zz5xkAW{Lq}?H!1(F6mAHZN)E1upn7sZNP!EM8pm+vIu`evCowyYVvfkrwG=K)3p4M z9_hz#EFXPx-FEvj%@#BOxD|w7`HFCnCvix@bZF^aoO)|%I$|v%rX_QMH$$tOPr$C! z?`FoytK_CZB;h_TM5r)C<_-%kM$38@xIs|pUxjMokGz#&VmnCF%PW%t*-ZZMI z^X(hO5fvfKC`eL8#i|gMNf{Cq5fL%P0VE8mMP-PPzZE1(NK_^PAyupVE0w4Sh=@^{ z6hgufNG(NXkWk2EWenN7LfDd>^u2xFZ_o3-Yn^AE^PY9i2fwhEA?$q**Y%q&M}+b; z)^h%SZD!mYI~|;P33QijAwO)>R>%TEgCTX5=T5mlz4cygX=ux36L0)IFMUOD6J^b~ zZs&hMyP-Cin*)}v0};C+ALZ{8d=u?Mo?S`V-#iC-*~8>iTI`OyNm!Ei`=Fe4 zjbQ-wOvyK5t@>te@p9?|LR;p+?u({V$bUcKL2ZIf6&3vIlBy)v$;SJ7AD~*)9H(Z% zn|^Ndr{su(Z#WlZ^e`y0gUDJqIjo;&Ax}O-QGz@Bx}pVZFi`dH#0Oh{>TSXOA3i7H{dcc=<@=h8BgN)2-lD||5tj2>nwihl#u2D-PV%-m6;dvAPsw;?{$1=ngGAuuw+Ev9mGKU` z-m{73>eugRo&%qqBjR|k_m6*EG9_6IY)ukV21Y8m!8fhH4Tz0GFNmATi;)#fRK#yD zX#m|^c0!KV#e?#onFvpOh?A(e93b9(HQoFuFu9_?WSN^wT? z;|Kcx;^oXasZW7&{IU*QZ?Ar~a>IevvXP^YM7CY2^uHh%X|6KZWvlm+(g_KHFfQ8DO*20xCNv0#9 zH2%?3V4CphM03PJ)@GnyrwM~*d zUv+dJfxsXm0g(@Z^|=zxRO~f~0S&j_1kud%NL?g8Yo-$%f~)D?%`E_*ZpeF-_$=1- zA#Si%Spgnzm-aqU`iQ?B!98UT)L^$G51@7_mRo@cnk9MynSGz97nIY=GOqsj^m8ST z1{3|d$Sq{Jbv`|z{M#zd%II41U-=mo3IFJsAU*ArZq)CzJUPeXJVmTZKQM7?@H z!h+hHnC1x36s*#L{#a<_TXkswS3mc zFrdJjo)?^>X#HoRmrQO&Xid16n5NF9^f&Z6yeWDF-uPPA&oT!hf)3Jtm{2LB3NTc` zI>P3016hGXa7bk@V=VxG#|5iq{{=I)bPwpvp*dMi|6C5+WOCtXW=gU49dHBVHDRnK zFP%Q~1M?dQ$L57|8KD4|&XnD)(ETE%<$Ya|LK^{a!#EpaPTU&nb6&ylS(aB3mGmUw z;>Ow?O;0D>?!@$l#~K!goqADGzG;?NNkq*AQsQ)nA{6}nuJE+-h8g$-<|?Dcdj`P1olJWFQVolxI#@Y{&N!20^CQ&Lj>(}$R4 z+DC#Yw!W?hYYJds#>on#rJ2mW2CV!<`Bw|(I=cR`p4m<-pg_f6a~!LK#DaN&-V^Egk##gd7nl^cHkyCYJ)9=WK%W6;=F2uC5v=2!r&r$Sm& zt?{C_%R6%tiA!T52)zUo>b-^E!|C2lmjoD)KgP~vnpL_j%si?XX)mEKb9B*$o$8do zO+=h4b`A7Si?=M1KCn^iL7D03J}62=Fh!J_{RmAS1(dbHoXU!PA)pInMwN2TIu!R7 zwq<8mp!SM#oF&5swn`1vYzyu^k2^eou9cCN)2|PRdu)RHH>Y3vmC8akXEx^0^g)nM zFuDmy4rZR~@Bq%U(w1!)oWom3FVyA{)`III8@W)$y1_EyU3f-6rfS8M`6kK&Oy{11 ziiu;KK!1`k^S<+FRZn67=_pT&0Vz+$Fc&pb$!J2=QZyEl2tlpQS@V?^P|RQB5dkBL zYkQC!0@>yZKOq!ZN1Tg>l1VVHg+wh}47wBYO-r;Ha@hweiq04|k0L|GZJFF?!?Q`-Ur}C^e&2ph;2*dbYoQ5by{iPUoO(asku|wU zzG{v;RIUr?lw!2ENa?JF>*FpT1=Ut)ftwOEzh}0|$8-;|-*Cy=`+|>u>hwTflsOU% zaB|1gy*MJP$)EVKoxdIQn~~Ca9T%6;Kl>L$1TGiwqyGeg_x`*{!NTfx137UL67>v# z@%3STCF0moOQ;PT76GMAW%N631P_>SM=Dr`L6r8R*~ zb$MS|K>;4bl(}ha7{LoY4Y7nQBg9`pY}`gMq~nA-LjVotn1LpvS%5?C$UCp02KFr# zyIMl~X4yIOWKlCFy&G9hGL8w(H-Q3YmXdd@NMTDRNmd6?m!hgXlKwg7%x1hz;T zWew{b*I3t$tHigeOsH6EL`6*F?NJEUftK!9>!3`xEGCxzxDy;%1_=$p@`I&uDT zBENbVleQt}J1DuG`ixg*P|5x4q-vTC8%Tj(`n(P4Lmf#x${OOzGp9j8x0q)EMaspD z69be03E3BFcu1JHT!?%%X0b!bk@#e+F zHECj@&KB`0b1g1zy5eHE)00#*6|C-RDkF!rLxgl$C%Y~xTM&t}=#UA-__oo-i2rGg zH?xO4-z>rCgjZ@CJJYc%v=0-ALn=9d(=);IxYz=84tEr~VmZ+m*e9)SMD{X{E4Y@E z1OxcftHse`0v9YCEd9Dc_>LtDdc8UihH-zDk`>&e+r`-0SH)R1k$-^;PRKQ$f2R)MR{>7IpOc(k=(wWK9A58#7xf0 zTma3IA&8^#m*dh4vA6%WWyvElVO*2Awnb|c?g>N#RP3&o1M!|7C4N5)x=zva>wY#o zAC-2*`1bz)d9nvyD1jsRC`@Y=BDxS1*4u4`C*u03DZ&m zMq;IX41z1&)U^l)N|uvEDr@AkVjq$Y^)@rTkaU)u1zc=O0#^49XU?E%ymalvMX143 zB($X`Y=ka4$qwiz*dm!iods-k7peu7$0umu?U@>^jh;3LIjISu>nqqIk~L#H;tf(i zyqvDYW@hQ}C&}GE`M5^#_ zmuDlVCVFDF*}eO*u%!ZxLcC-ZOt9_wb_f?5+zTJfH@y446m*KB2&QY`r^ z8Llw*8e@^%hJN08s`Ex2ZdwI($}`?Rp(pwF$)YC zow*FaYpfupGCdh1zARFa?KZ0k_lDo79J<3KBsvt73tCDcP8&n3|68i-8`rL9GN zWMPnkwmVaUg_fDkSlqx5eVn5Mj6k=nS*3T&EjsJy+qeBs2A|WM)2iIlpQ|Und`o)) zVsSE+0%;b13Bwq``VPb&fFg6gwIN9s|BGAz6nF@}ES9+w`AZJiE}Sg$`YiD09q8iO z$(5LvI@eoJl-%-GF<&54S{;2;KbYv}dbSWN&iic+OQF^U{Kjo2MhO*s2XyP#MmiG>iGtl^bCPski2Mc* zBcqrO(>q)4AS~giGbe%vRS}FKU8Uudg-ez4EFP&e7P}5W=!qQw>1&9*{AAOkK8L7U zQnTP1Wszs%3xxd#4fv9Da#ZVs6V1+o^o2|zam5hhKwoZaS;N&+^vz?zY+-E%^_zH^<-(c5vrpYt z)~)9Y3<&1T_X2amcEpvGO0h}g3_oCQWSr1GaZ%ao`miffWu!*nK3mxx?@5Gz{M<$k z7N+3Tcj3Y5{0cf-Zk4gUin5w+-770ehlUlR99~>pF@R6ZO8ljymC?ZzDCZKVGV>ar z5X-UV<6^CYGYDI}rMMe{s;FE7QP+oitx}%WxIVbp%mUCfwGSrcIO^6jR%&1Hyupf* z_2Entb8X~_rRmOsv~AL-JVqRk_$-vVa})_Bb>WL2VkC#Z7_HCjf zDdpPSoRX0)mwY*-KMnK*XSS(}K!6dqaY0XO_&9ru9`~+pF$0U3Knl4^c^PD3VRw>D z8T*l8_)hbvyY^Al0XQ#Reh~5ne=K0JI+sL~&F^o%yZTDzb?eWv&J0f+*U&(SaV?_P zJskW#u{Y6w_uypq7Y}uc_HIJwRn{88d0Nf@VV3~jG^k90OPe`s6T^*EzO}s-C?-ip zyDl*z7>XXn%)gX`zpEIIATvBtBa){IQV6HwUHqO0i4Q4{efl2rociBmvuYZ&Y~A7! zdb^0=QPl;6YZXcIOBI$4u=QXg$C~MidO)1K>JPKS4%l#7D-g-zgn(+oQ2OT#?T^0h zb$LKK0gZIb)Ssr=mQ;G7YxmQK)$ZC~(c~oQl=8T2R=lk9D$npDRNsP!+S@qB1S1DQ z^Hgo+ZB~Z$uf&y}C#C-eu`xqWURK}rTp~X;$6D#VJ#M?Z=a2qQeqU6uKu$~Zq=7y| z4|yfUWi8Ar!Mj;iB+Z=u0ww#rgt*hCR?5kQ9LY_IDbH6Su$}^C&m88%J&RBW(h=k$ z71FgIu10d64z*mQz8DFQ*)g)I;^RWnNzD$FjLbu#qjku(3o>FB&YWqFh9gCAseEjD z4w_QuA)FU6Oh?2+vktkWY8thLK2?B-z@PzVTDHL0xjomvb$&2Fsx>&(O<2!3nh4)3 zkTt{`BtS{oD#q?b3*IvBHNi@Bts)eEgJ1?7&6i2cCRnR@dm!IK1-|!mEeE@SZQcJ= zU$qKDciRsYH^}M7y?d*)q?2_&lb(#^WRF08n;A(P5Tc)gFC?O6okcS5o92`G+ z4Eqdg?*nbCF@pk~=K|N^wn5!ydV8xi<_0l_Vl_drCJ?6v2(}O|jqiY{MEX)ynls7P zt%Z%13`;=oqDp_kBS|soV&e5#06LV*Qe#azeS6SsNn3ixLb=ac?P z;57QK*rk7@uQ%|H+D`jR<~61fV^1#u7aL&-bxv!Z#)>&jnUKAZmCeHNjzj7xN_GEv z&%Mq#939s-ii723dWI;pe>-nqX*m7f7-`VKadh}(b$$AVLulq9fL&fgF11tozFufM zU`i8a^|;{Wd>Sh*oT-tU&%0A{c`lL9=y!yC8$hyhO&K1%uI>1$&NT?w``xmiM9(9_ zro^1A?Xx*?mTi+;$eJltDq#lzbn)pCi-Gi=n0XuI0<&{?ZXhX*j8HGgF^JzBW|?34 zLE8CQ!^jY8d}a}c@~Zb!70H;~1lq0qiURG{fzKyvUgw-_(1V64AGWCjUUd)eeT4+r zX58*ZnWWdJkU_b_FMh;W1A~-piu8}cmZOf@leYeC?q{pWi6!N$C(7uq!Vao5RIs|~ z;=gWdzGiMPwn3DeRVo}3*?b`;Vp|Wvj82iByZco1Y^t}Z1T?nsn3c{R&n5*!Of?IpBHk2jws0Jn!C%`TA#T*ey%$7y9;xkCYo2XQWr|oJxLIR5(@$fiL1~J&{#&|)A%CTH_y{4d3DV~YeVa^%a?jL=F*%j zeN3tZPiMc=A0wg8#8&RYF1+}T4rQ$UM{lfwvKu|$50Vi&L!hx%PCD*2UbzR7sMC?; z)h`hz5v`PgecO%hd(^SxO&{MHJ<{vI#6RJ00$_l?;aI4tI6q^~o@9(%Iz*GdsqG`i zfW-P`+FuAR?ch8!fr49exCY^f{fR+vcdqY2_sKklpVErE7$m90g34S8d!+@xWaQu5 zN!4NdLXW;ZlMyqkuDShbbW~P+op?`hhW6zk<|=D?gctdbUWv*)7~GppUaXIU2^h^* zN>!$=iq(4MGkeHJ7nmx=Y20XQ{?VJLZ#7?}=1zmoX>*(gKy^cv=n9~_2$hky{O^bn z{lD<2g|C7poWd_>*l9t$Fvlc}9;JT%AA79j6yhW$u8sL)>^sB`qIh?=HE53LdO{;_ zj~*&UjzeuB&pqw7C*2Ds!M<$~df9Cu_6{ZsQ_Yg)f+*S$PQ8~cEx9Fga05_Kr4^_I z7X~w`aR%+Beig&S7R+ko_;>lOofSjF!|j*lDI`iy(Lf@}JglMNeVP10>|4V&#&K6%|QtmLf9U|Ts}y!WcYV8?MH@BFo}~D&tE0jgiVnhAHX7?ex9$a%;hrArp&J_ z#yyvv%e(k6R$XW;==C&IykXUHhw57-D;Rbf55`ic6r9E%s6gtt6AG2lGWjbe7lIn_ z*$Ev0EY6X}83Z8IrxSepH*a|y;U%ll)ivGm=jSF~%oO)4|6+f4`%}`Jm{4)_5_D1k zSTSV%Ab>B$Q*yUy98}awsZ3I?)i{GUMqr)B+`ySIchNiL7-gxfLtmDuw9p(v_acwv zDm{eT%3lQyHda^#!^=RKw=6bDvI<=$^AAm~NDk|n_1cI`0TV_+?C4f_S!SA4_kksS zG@U?8!>!iYg_mOt8MfL|Runs%u=}k~RGd>286ymEI|?s<{oBLJ(D8wVp4g=4%Rg*) zaD1^jTAC3Osp)y`xlg7wYrSGrEn|WBWcRU5hEH*>I#H{B zUG8%sCOO7)VBYw*;*Hq{gVs1NYHHnnQM9h>w>#duHoo#Vdw#2ZT{d=)-K~|uzQ@+u z`UM?ZvC%kY4lV?aR9(Md@i&xJx(?17!dhfl=1_7P;A3GwA}P@OUn|t%)-A2Nb$!(Vc1Nxc1_WH1Tw^jw(qMF}0{+n>fSXQd#P?!-_S%aJ z>!#STz=Ln{6O0Y2fy1|HP{h?mJ=K97cl{2llRNDNREAtO1?~J!f6zJjH^~PUbd2_8 zPMax!65%Nj1Tjf`2?~=ZlFw5b3`;tt00)*+-=et=i@)8}IMlpAm%=)<wpWfH(XK zdzy%R>xM4qS|o@&G6y__b9yNkeHgi40}PjK{zs2P`2Z+SeF|3arLah2@ISw?~gGQDqu0 zvqMlNNgm^<6nxKLhMq^LU`?+>Vqle21w>L5NFX0eO~BhCc0$*vSjl+AH?OkQL-7^w zoMIM=@Az%zJfCp;!9$A+uIBetulSx{`6gorh9i(ZUcgpxnXMArqVDsKE*>M8{g5he z)5|K-uE{ZfBvApvasSZ^_os_?9d#xd#$rs3!uwAwK4hT;1&?KpT!o2_*anRj#g!w; z!a70$H=0~gkQL|RU>97O@sHk7e$7w+g2KsR-kxzI>&`4`O?H0ebHV&2$^7CzXPSA} z?>QTfwQl$sw7CCzxA8|i^Mrq${Q2(p`!)zXinMnLUW}CueGY;%_-w3R4H8-wYjyuF zr;*6U;^57dGMwJXWbm%h$(;;5JeXSTL9w7yTk@@btWxU?`)38FRDzg@nMKEym;m^xKo-`An+-S}?&|A}q=fBPx9z#n@>atX2}zq!Hf;D!ml zp{|2yS*=<}GWq;X4-Vv3Gi{?s4SM1~2DH0;`m1ffTIyhPG(Y(Ed{ZG7+;f<8;!0XVK%5J}Rv+=}Vg0Msaq;He#dWV--S3un zY#pusBT&oz8U;XD3PKIPg(U!5!?bl8PcDd>B{wKl`qxokjl;x!_RY+!^E?;mu}G@l z43NS*{@n>XFJA;0mrJgwH3cWS@~e2|b>S!_VuUJ*y6`OJ@pqW&ssA3B(aqFnET*Ml z-L%EbZ(<`H0Ik<4q*O(<6(gmWFNrZ~GkPQtF|@GsP~abEnWZT!&P)aAthyPi>;n=7 zuXE?#S6;F@_$trb^a9RIa87mOeytJ%+@K(!bqAd$6XebiRs-H43(mNsP^G&GEEx_J zh-*8TbL9@H%O4KK;LLR9lM!p+yv!F-O`LSI_?Y`pIj@oLUxRzUdXzB(N4%O$ZQM>OXp-=p~<9gcOKm zvy$Ni-Dw$Jj=U{L;^$=%cQq&J86-%w|gf zBD0PGnp8+K4MS+8S5>e3hlK-TY)%>CN(^L*teaK=?Rm~`^YTdCL9=7CQE$&DPdVei2> zAm3fbInuxOBdv#~e1OwkucPVoL7xqaiT~-3`nz2oJpC?86Ttivx12$)p_n-fQYn>I zEdj|*6f2_410=X4gyR&V>?&0jVCqw#TqG;$ zIf1E73!4Nt&?7D?tELhrcYvmT4^HHL;Q-QKQo%x$xM6pqc3FZ1HL3=3pQ=U0KA=E9 z51`mti(VGw4vLAqZfzxKUb=!o?b0|!(Ru8(J`wbP=wm`^8!GE1YX5pF7f<^|eOX>` zZdxh%t3Ga|x@K&x_`+5QEW10|DCH81A(jTBP4dRL76*1W;yD_Le0|%7;?jkzutz z=yIYElYjJ{)izfJYJf=QT>NAyz;jO3Q#jC@PzE_8+|u-ik3H zF{3#~2a-$n_3&5eIQG&tRRtcCU`)>*+-uy9hB9*ZU} zC0HOt+264y2Yvvavu@`cI?qpD-@5x;A4-uQ?B@lR|K8}lP{O_Jq1R*Vs#8$+sAuyfuCiJ%XH70FG|zIN1dyAK*JdFUn=V&H0@_ zmK@AulT!TRN4p&>?#oW^cn;;8Zds0-9)Z{`I6Yz%?J8#xYW6~cls0jef+%PQ#8-wh zi>;iPCv$E9C5fa_Y?Yc|m=yT7=bP%D7x=l9Z3nCR@byWh?@}Is*aur+hx@wo{d1u@z^mJM*ZXHuU8f11ou(~lQ zPrA+^mPU-al&~&PKT?MC)d!(cWv#{)kw{5562T#~oSyMF7!JW%q1zh0uLzE6qUB*1 z_nn4`&6FG+hL>^3ae7X*7-?%c+V}g=$dF%o^jHuy*ZOoRFB$=LYAsZ&O3C`VSrf0uQVAQox&cILRYqQb^_zDQ zGRr`i=l%KAIm4(Xac2hKjw}wLe?Ckzpxz(7`b0S`%M9C$e+NHXNzlq6WkFnAJ9r+$YJfz0x6 z?N2=4M-w{3>_NFvUI+Cu{8|05mk}kkB{n4uTpGlJXkykg<1g4JSq zKVDEg8dDdL$9Wc-vqhd$4lmDDu)tIZ0ThB8j+F5^EZAOs6Y+qpn!sE_RwFLkx-e zeR1dXbkDYM6}QHKPgqr;A-JKhGnpO#=$Wyi0NRt( z&x*mD(ye4xX=k(m-J=->tN+x#z< zc2-2!5H@G3&%7DbnK8wD!hj&kLVMGbDwkYkft-3VOJQ|&miytjOu|m6z^Pv&rO&|J zmC;*}!Usu(MQs#Q&$AWGQLuV55&`v0esYB?EU}IOC$RZfYBPz^q%00}z{^TH@JdF^BUT4z z@$o~t0aguB>HW|jmqoBg@`DirjTcfW7v-V08bA6>BjFJydxjXR43)kwdW0In)w53M zH?&m~17!VFPrE1MWQXdMU%-0536FBP%x>Z$op~pmtPX-~Ma`--f*aDsT*hkh;~6Eg zKM>U&lpEqdfZ|2T`?hFu!S|*N#x`jDbe$6D8f8k1Pjk?%rRxHmypfSd^ql?O)fM|g zr#y@?`IkxSeVF(ulkxdUt8!+

g?n&4 z3#9izU37Zj1wIeR$+87z0McWbp%*~%>USBFlnqnDm0*b7$}wniOh(Dz`Iz5sNi1W8 zX&=tM6B|6(d|r=5<=X%h1R54{EUcu51pNuL+#2vI(Dm><1v&jZn*8qp-X=&P!fqJk zt$#?N-=Xv<=SYjllY?M5Olfx2qK89t!FT=kb`OJ|!EFP?IcE-D-wi7sExGY-@KMPPzfMi9{~Bu3JqvJKXK8y%+|4dXNy>%5?AH zx}3`ou1&^ch9(zonnVPgyBCaW7l~UP4LN ziIKEC9gy{1r!(}CQurH2FtY3seeHG{FS@Q&gc0MV_RYkb983A_-bRUum24nO#>U`G zOEX;JsDXOAvZ*f-TJW@`a%EDJq>3%V4<)Ntf*2Q?JeB!QeKCyL4^FQnhXC4UNvt+fXsRr2K@ybl_%&kENoC}?; zaLZt>L-pW(K^`lH4c6{z)wHirBEACfmWXdSQh*E&HtQVvXIc7m_6g(~w)Q!?7wQyt zTIZ-%qI)2h!5f~*4tWjFSZC=TLMd*Pgqj8G3|<07uyTE=u0>Ei*i2fc>->7I-z3H|95b9}$c*suXmDy(} zsIF69+nb%KU%jRqp@X8}|&KaNm(`tby3 z^5ouS=7D>cF#Q;B@L0n9YR|FKN2sHvK3GNEZ)vov)^(1yl40 z=2L{t_1!Kh9;B)UIPoUM$_0lb2Rxq&cE3AUF>6J3b_@J4F);8lMQ~p*?h4j-QJMb-4cOe;elLofQm4s+m0rns9f(0QEnx|for`o?;6Nq>q11(U|WpTL$G)h(KOzo-)ZH4li9S8bTjcc#xmIy%WQ<6 zBYY=Y!F9nHA@4TyIH-VDQm~p{?$Z~yA;t3Y@PN*iaR~uKoo4q@6xAM$ohcTaH-G|JV80gj( zNv*V|xJ)AALy!DN@2)ee#hI}fOmgyu#=w!yUCsKtdN-6=hOcraEryrbjv>d-ki*>zM7~;e!Q2@v@mm@@>g+kuBX+tC}k(>INP-yf45OC&m3RzhRR8 z_+JM9^=lUWmv`baK<`yxnn{_~2d{BaZ!mRctdVS$t(tb`8c>4gtmI$A%T$BynXPmKtrYGRloH?e&d!~d)%g!#vO;W{Xc zd|tP6P|5T_9o}?%+8b3qe<$m}(o82`B4yJFTdFC>t&XWB@1Buj^TmJs7;~v4^kcN1 zegHTVyCDNz7N&)RZr}j~Sq+?WKDj)wP~(lGDcK|4L-b_+o(sla1NnjhQODpZ;l3vp3oxPxF znS3(YE!K-(j7QF~uTo|YG1r0pY5@|bPjvq;{*rMJ_g*U%fdo1L_)(1bjzC;8pUt!I zyvN@KbzbMC$Ve9S)aKxwVK#noXh2akxms8Ju+z57wa4e#F!@lvUi0r8zfd`Eka-y% z3Bx)exS|Q{HXu#l_$~#t z#+~pk;&d6a2e$y8rp45I;{PkE1xlXa)opez3pznTwz8Gmc39s_2g#m8^}3ZX)Dj#Y z)C43li^l+fHc>1#dr*!CH@KPaOM%aBH-cq)=O4q7+O#)suJ^2*c~O!*Di*JecXZsG zn7!tkbZfto`@ZY#vbXdX`u=EP;=fbRjPer)^7f~)O_)vAQA)FCP1^~ZwAI9D3>a_q zJFNd}9(yCjR_BRKg+hBy{AKVZ z@(&v{0aY~5L#2JxcK&WIn&ouoD&%PX^4~$#CPxyh` zXr)OnFQacLVJ<(WVZ(>R(Mz(mx0pDFFXFG+!u*4{iocUO1jlfa>@-bS2Rw>HKHph6w|4H4O1gFghvU*@nuhQlY(n&`&};lh*!z7XXK{_$78>Y zseMQ{VJ3*7T+OPwARtybvno5A&LDpSakO8x6a0W+Lyuu@d2o-wsFdtl{nSFxo&%yR&KUIk$Lyn8<(5=Ey{ zAGViX9w-X;JpdrH-IG`J)3!4folbb{A8rS#YEwMw^#Z9B6>h!$p}B6Z|L*&AS$j%| z3`PcIF&FMtk1RF|p6>MI4HdmU$ew%>~@N(dgaM|aLV0$zw~8X7oL7I5ghuSZJq zQ&zW>ISi00Cb)8TB`jKS^I`tTYQaw8X6ZOkFzayOKmG9l_kZj|X~Rp@68%>|hBsS^ zMF^(fD7vnD?KA(6FYG*kfUL| zPU7FoyX=00$>uy)arMYV zp$n2I$5^7SbpZGRkjnF0#K&CZ8%i6-6ukm|;XWPc2|c6aag;XZ4jAO~P|nJ^7S#FN zr_?JTz5ZJeaEeK-uJAo6_-Q4LHr7{VaIdcZ4gTVV9hc*a@5}zWtK)x-2Rg?YTB0u0 zKHw$Gh{IG_(slkOh8YNEs8(4`9jCu&3w%a*G{pJUz3l^vTdrBmk7PQxiNB??_tBfY zUNXciE1tS-$LClGUyk@w-Tq2K&5o;=B1O<~FyrttX~u&48h{sQd$1o$N%+f+uE}1)zs6Amcdt25-S?^6<1! zu+2ae5EtB;(E{;jVW9)!!;Kx?f}EB+L@Hd0)m$mjggUympSQMx8e!GIX;Kl`PbUk_ z1{1kPqaz`_AODQ8E}px2^ZPfq)-0L(AshV&YBH}r;y6SE1u#nV2Ko|r;(z(|SBVG5 zx$j9xwAT(-g!KTwP0V*V3GLT-7&w5+MGDpppo2}&iGf|s*wOM@pyoI zd3Vpu<(YfGg#~P9|NejF#J~<}HtCuruW{4gN`d!}_}aG-mnm7o+fxxXJW5YPNJrlk z2(er0D*Yl~y#ZdF>h^}vHp2FNQeN_nszG>j!!y?h7D6~=jotl|tmoT4U#ASYiRzBI zK9aJADQbSVXTbT@_rp~=jAR)rtI*p2?x&pe$oU)$2*MupcgFEJ|oUCkeRD{vIvIEzCwMFAnS>=OJ|Cpt+++Z zZoUf=uArFG1)_!gJT4;>MLzt!wx z9EZ=nl`DN)vHEazek+I}iIn478iAdiIOQZDjjBik>hcl zaTe*xNllbIN;N*47|Ru_8VNh0%qAKVNLjAih8*Af3U)$&fS0jvG7M#gky6}M9;@VC zDg796Xtezh{Bz;@xh%$Z?MUunw zVim`N|M7yk6~P6*d|Q7O{?%+>jOF7+u&9ZG*t3)rXwVSH*pp^%TNFT&lJhpza-)^$sn?kIHcr*l1XCdT;Ab!vVRWv(5+#swg7zV;A{FN zTc7PO7;cdj>?LVS~LU z3jv%|vfTBSTQk=%x^R1!k`c?_L0il?P!G3CZmx%bK^^_Jf}Ys(eoq2%c@I(gB*Ftx zT7mRhfj6`ftfk;QCYIUBTBrLq+L~XiFqL3=R$RvRKIe8@qZY8s7BgLS?-%UTX~Dvk z$%OLDa*Q4oEPmX0jb&DGEZ?rOAKmvb%gJt(9m^umM%2Zx(lB1N0C} zgj?0Gq^jE5JT$OuT!OL7KUzS7Gkf^oLMq@!{~&2)8KK{5HtR&3MG4$IV%5>|7l{A< zUY?1pbFuEd4#xxM)iuvOECttW5)>)G;^`z4-KZ|0tR4@#7u5Y>Ld;4o<}7BofH=?> zv~;y~kf4Go0*i_xydyVE1cSLLhU5GVRmit8qW)CnDKHM4(~7lx)9)M_equ=JQ1JSH5cj5GO`5Om;EY`5kqpbqkqpinL*AslcYlY*ad7Hh%x zvD01MR7(F==!4MOTcCu56G(a11EI7B-{~5>FT)qX^gxD!V6vDj$4z_53Nf;wEE#?Y zJw=|PwP1DN9uIf0fdW*F?c&7qHv{Fok3#@j5M55s@q%xFm_V=*BbIlndJy9i{q_YX z$e4gowPiCoquzmy+cQAFF)=Fxaj>@ZCjF|WWc_O93;mAQ1V;uD-BG2DK|y^PtFAEj!!NB@N=_UrJopeM*OP>$U8_iB3Z*+D;b zXqu;^LI)i{X;oqQH#p}j&?aGXO?7ldXcS>{U)A_ThwHO-*>#{5et|YrGJKGxjL0lI zSx7d3f--QHOmhZpnC9uH@zaZou(hD3;w}xvVIp)yVL(V5Cvli*h@MBfBtWq+Xr8VIF2RBiyAb z&p7sRj)ra>D*x(i2HF~e0U&m|F;2k5TZGN@lQMh<-jLM|5xs z3u}Sdc`QO~xA7L@OsBQv;H&cIjEkUvAI|8h&MuBCK^;!la`rqOj&XoMIhqk6LH+Xc z*!<;mFW?(L3{kjZB8ckCs``jq3lId$FrXR2OwUW%8RG<7N4P8PD(oS&6OCITM>Ak@ z=Z>)haS!R(09d8^lwwj-Ag6z#T^hLQ*{ik(WqyoykFJ1e$FLa&=8vG^nfDbeVVa{E zGD05W;G-)8OeA1+<%nD=d^q}Xhyta6EQ6ECI^0sG;l~(FXk&~dU5k|sAh<&bXX+o5 zm%sh!s&>VP%r=3;z!YR&`Kc;3ztAiFVOoX@d$BeY^@qKIMA(yh@1uKiLq&NdoF^2| z2%)vwt+%}o1rBsRTtQfa3{-(Z71fF{Wy+}BBgF~KHOzX9=0dSpWuo&eO9 zETpn6|IwR3k{IpZf4#K(5jbcP3HI8b(P9lr{~9u$Wv}v*5tBKKm{`X5aAaXwO-jjB zKvx;Uk$=jmV(jH1O375NpLq>O%36v~aIQ8IzI$FWB`#``Qwy(&;gLy7r~wBi(o&et zXozML!?jPp{v$SMC6bpm$4M7h&Iy)C@ONURyO_HeWMra?N?s%(oipC;)poR>sx zjg_k34(QlKGs=x0z8^gU7CwT#)qpcQbB2VedlL}$+sOXUq??4=dzi06x+1}6YO|5< z4qgm;i~va98ZDa60>;5@Fh*C*1DB%&N|l}SJ|Miqh1VazL?`U_Rk*yqmT1XxlFlyfUKF{on>rBosjGDvhus8 z4UzDGjLqhMM+%+C;zMYaXX$S3%}uGY0sEHht?wF=^*%1)7oTRYEn0G??9ly--|YF3 zHG&nfEC^e_;zrmGNmjs;qE+Z~1gnk)z8TV%X29U*e^FZ zA^N!q*wz&|?myOiH}7a&%b^fDe~~V0V-uRAVX)eWOqU*KKl^sQ2WqX^hWd{C`nL8t z(-O%mN`NZ759^_*LonI=_2lvO-`o8$c|7sOZ!cy(H>DAr_l9VGVzmVet~i+Nb7(bI zd+GjUxM{X%HzSVsD;@6)XY(9n7z=F>nj_2b=p>wVPyQW=?HD(K=cW(6A3kyMi~5`5 z@kI+hU*YWoK!}Q4p|wG22$*-_thYIH5&&n3xg|(zjYcVAB}R+`=|X_jN2sWs1Y5zt zh9`uT?)<{iktw|~;#l3A*#IVx@Tslkx@zsdM!)8`PRT0(Q|q0;>}madH25qy?1|{X z`WY)=H%x5j7`ulq=v%H=1M`8b>Nc5V89l7~C_H|rD$FsmI&9u-)UjKr zNo{=WpZ^*Z-T_3I9Y|NNemTIpmdKGp?nnea2?rmAvIMsJt$Wn+3w`>Hj0gv^qWvj*Rep)s{v<&!s^ZjIJk1Y zxrb?CfRlYG5A0ZEh6^IdWUfO8)ExF6r_)j7xeU95H6=Yp}W*e2iGot}k(OGJQ-f(5lKv47*27)U@7Bk8eUx?tUVx)J18fcOh$>T=EXNMqRRY9?UoCA zarC|PAiu962^rwtEd?NSJe9F@zk19X;36W-sIjQg;EHA?N|MsGi{QLd5&!&g1)1;g z9?T9o{$7`UNI`Rrya0nDM%01Hj3PGzlay{*?=##@3(!@&phqNDEYJj+X|Zg1N+JQ% z8BLYxjC{ZmKGW$IWn0d>0rnkPFrV;t&k18Vohz#7TuCeNi*X}g_lhUOgx5ml|oie^`K#ZmD| zV)(%(=aFcIN zu}oI<)2Rv&XMxqigtE# z%;M+OXu49+Jdvk?gan)PseVO=Sf4?SqIq&`IyIo!ZX?e9zKxW z;iv=kGpXbzziFkEoSBz@_NF&&lSo)#gX`2|`XZxMf7gY^%yf>q%m04QfBxWXz@gjq z=WtwSzc$tr4hVOV*{eJh&?>FPAa6YasejcyKyZlo>PfYR1LEdY69o-eO*bg4Q*YjH{~< z1_B;%7i0m3-s@qFFe=Elc5tqvkFj6AOV)~z)12Ag&|_W>?}dwUYEbs6mO)Rin#q8R zt&Ml?kH2^lnD+jV>il0tcwg?{Ty*Onxq3H*qqP`NU;itAOBG?K{w*2fNtFM&{jP8K z8QAL?27~4(metCXH#-~(X=h*Y94M>mJN2R;FYv6Ex#Iy{ThhviarXd|%F{_cetpD$D4xM+fJ6_EBcI;f{{ev#mSTpXITsisZ48 zTEBN_H9TbFed5&w(wpqaJ6uXP9hGO-$vmoqUJiE!xCtwt(^M)3wGwrgpV1P_&@1vU zKMy*i4J6&>@xeJqr(*-Y`&k#S)36^Zw5xMJV^WLO=>A{@S8gq79Rv}#G_g_&be}%5 z2Za}2ZwHP?TZT(eAqex@@B+6p4litEu6%_(_Djj?@S`J_BGSfECfK4Za#w(54%iw5 zI&PJIJL6|$oA0#%J*eD_J@HZHjIG|*s*}gz-W0~p=GFJb_Cgc8&SZQt#}Wz+Kj{Up z$ahPfcCpzfA^xU4`kD#yf@7wJZsPie+Xd;iM(8lCXd_$dt*GW8{&^mlf>4Sa;VZ4K zSv6?f9LH4Ys*Ea|3id)=@0L=Bc8~*}+q^IfiwV3V+`IAGpZgAFj?FO_GYr5nCoPdY zu@`Xx*!7H<0XCGG_TDty3Z^4{zldot%nE_N0U~ulQUoobXa#@^Tq3=9?-$}$hmX^& z>3lpU><4teFwZ^rFSTV;l(_7^J+UR@B&ibT0drMC&Gk@LT~j*X>k(Xm*qXf>7%4?{ zO411{bUCbxGz;MgZfwbUR{!rn$bgOg2qQnfmzv}`{jXHby+Ri zrZEK8ICu3cw431_LLkE*29K5GOgO(_0mCXsBA% z^UkWu%a``MemASli|;zh*Z~q0Dx@Ku0_we7T9wrWc;JeK-4a;}QDs8PU%83o38LkGdN+z#>3rzUOnr}2hhWJZ$=D%z-See2rQ7m;E0)>cxcZ#St$c^K1k{o}&_%Ll zJ-Gp=FJv0MGWr^S0jOwP*y4mPcZRuLKb$LAr05%vkLLlqpdn6!kRtX=uh)z)e|Qdz z;NDA;BmunPX;ppPFDS#7Bb(YEEB$AtKC~)V{P(Abq3VV$8x~6VejsI_NgWI0Iwj&S zof0D+xdQkp5j%S`i!2egYHP^|U2zEU8II@MQ&`KuZG?mimKFCX{jD=Mw{r<=(Ac(s zX{OE4Z3m??@C0UMd$P!{;nM%+Fu;Rt#S{tZxE;ha3x+R}cXPW7*D@%KU^&!K;O|L8 zzU_Smn9w|oNvj|MIPySA0jzWt!4e(q;A0z})YiXZc*!M3)PN6pAr+HDxlss-qCUR& zrFI7xJX|6s6nTQ|!QK?+3gk+fg1n6G=8ImH^#si*yhIo?;dxl$m)&M9djYyCf}JwJ zwiMPJjKljh!>n6#yIsGuk1u__E!+jabkcoU@scQqFE}AC183#AUUP;yiY2cRPz3SK z*k=%!#IDn?`4|wDzz6}J8u?dREDIFe5v8oLK`Jl?`OxLnnu9imLaU??+$#T3s#Ro@ z5RW~K;6*T6c?SNFAtP}C^d2jG#T2ocLON!$1@%Y1(q6-hDdvFs%&BYWRXM>4A~VLW ze&$MQ09V+jpn<5kpLnl5S7TKBHHYZAadw^gX%D2_H;L43_GzNTE^JxZZjjSrpatF-6bkBjTibz0}3z`&!5HKbaoqg#T<8) zHee`Hkhp+JJ-N59I&u_B4qzwd`8^)kHmllOv+bPsuP9+T!%cU)p(j`#HT{pqnL$O< za-}%2HE=^B!--UPS~737Okm7e8)_LP zFvrWhtO^I;H&$?i-ZHO;;ut*~tJv!#5J42A^z!dW&MBM7_(sOM!KGa}yFJ;#i^r zQ9unxzMCc=%809A`~<%N+^G8M&BPT{anK{rKzXcg|FQbMn}m4I!2xi}{wRfKM8pxi zV!o}CWk_yQV3yDafGR-0lJ3_L6P5O^f&Z!^PX?a$g&A>zh-|@IU*nqcXzGT0+1n_- zW$o*Fn|+$YoMVFIPjQ4J));g%i0_FledBe$Si1|%|9zU$fgO{Hhc7%VqKS=!%q@M9 zv^1X(e1|vL6cz46%PLT=uxJhPew-8Xklx*9Wm#82$9IT3?EHr!jK@yD%9$c%7hwnE0x|&xNsmAvnQ4Q3cL-Qz zSO6|bMu(-qd$$*>U5pkZBpImSW>Razpaub`G$E_74B2iA(N=H;?cfB!WJhAYQGy4X zcO9|qZVUPzWG3WVmA%&`P%Uo86(UY*YZME1hoUuTw2bu)g}GdSUX=Elp-wVPYPgFa zBR`l99Y#D|6#=hYG&4yoOT8ITd5CWEydp22k5aG~OSH!aB4(_+FD_{w8y)X7nFf?x zfqr=g>~~aG1fpw90OQ`4gOtz6B<+E*Qs<)YVXF2NYFrLXpiFC{3mxv5v?CxfBs?#1?(U23DHaF?A!b8SbS#AS;VpdpHb2P}L*$<660G4xABzbbHG^W_OXX?w+U5L;b55uWW)H-LK24 zDcjQr32wh($v4PpXPGfggZvrm`ez7cU9o3!^Bkn_w*B^@>O7qZUMfLM_y%K=>a4aG zE<|+{v|>)c zzK@Q9T)sov{Xoj%C&wq8gYCO2$?@csG!R_x(+olfDy5}LFsfc=ha_{sjF@y4Il1Q* z!v>JSK<~5MD2rhq5>+*V?!z0=!T2+jA8_ncZxrZ%|hfaVkcQc}_*x$D?B$qf^oxMYF_l9JkBoE3cbHP5Vue*GEQ z%5xXM(t8G4yDFlk*0hWh43BjA4a%@LTz=O^*xLRx_0#tTV01XrB)yTPan+gvrRT|? zhVa_Mn0ROZ;z7pgM?xEZr2=QE%r69t2;Nmi;0f{8OUWR8kdrwF{JgR1-+H!LHpSEr z!QC5=mvVju_5&#A|D>uJ9ID@5Wv|r_;NXM3@yBfj&6BupI(F;ziXVBe}fLh zuj652`Zdpj@M~mKve9>|TI%b?ZK=Mx1g>aC%wV;XK$&<4Y{RGNzL;^a3qD2{f!|o7 zvPYs}fR%{dG1FgQvT*)SToeR9X82He3MlkV#fM*6=Ryr?41r{;zBftoRdsx1XjWMm zd=T7NphgXHADk^%e}(4aHFY(m$Yq#mCk_j^7Pb&Md~=p<5*Cw^p=vPMeC9p-)DwI(~24XOHhU#J||HWLO%~&KV6+467!j29O&v(CLuFw?_tOHSFH4Dy-pQn5n36;Ryx#;O&<8}Gf@48~|KJzrve+#OF zr95~>YjJ`YFRgs?+8te)%^zuHZpo~3>Lz9#v19Bb!BMw zX^%GdOA~xxBFOVp2H=w;j7^M$8>}VfRQ-0uKHY_74pc+M@a}fH2Px1JZ8`->0cq)R z2;d6JB-5=G%x;{aGY&(4sZ>AL-Hl+IYeSJU#5LN>NL&?vhegn+lDu;6jA_~V`%`W= zeDj8x=J0=ltl*y|Bw-}T&hZaEcntH_eM^m#+%4OgolM3sq7nNJle)cVoV0fpT|uJl z_+|7yow%Pr+_3q}nVq5xkLBz?|Ni^Ww|fm&E&68+sn9Rq1>*s_#Ij{X!1G-+@qqdU z==XoW-_)sdQir0-IGEecTK&m6ZntwKsn`1B)sF$aquVbGl#-}*<$;uAV~6Gg5+4_y zJocJaqYDWFOEQw#6O%F>4Q#4{uzgxDctGmgboJgC6{cg))Dzdx(?Q2oo}{Qgp#c0f zsp5G;4Fv5kwaCz`sJMpDl|7U=o07-#V*1`sGkgJAXBJnksTfvo4t-A+1m9vZ(;0SRtu`VK;k0w+V1Pe^E#q#M70|~?J6)CFOIs7 z7oR$2)Y~Yji@J4B_t1qX{t)Qb86~iA!Q9HHP0v%xg2)sTX^+t}U&4fi2M2#c%M|l_0a{qYFzbUt zP-mr5wO+ekHj}adeA#h0Z`)fHdy7d**M*v=^1!_yal7STgOy>vO**eG1!!f`-fNjNUE8s2%<^LJ*6|BQ{^@=%izs%8&{YQufsD;e+q0Gm^`&PukK73B# zUr2dnjq(RC&;0RZ=QEy8k=Jq;`g?a$^jxryo*C48ejS7ZFKuO!F_`jyRe_=u;IFll zzE0UAkM`UVuJAGK?r^_xhrkotCrge$O*s16(d0F^n8LOZdK(5*{^V^C9K`J5oA7my zoQYyNYbE`^ppXT-@8SmT2na8Q1)~fCC}VQkgT6yv7ASPYK>dpX;WhL&ZcT3`h$YAf z%Pn$f93UO2_ulGE*%*E0!h$83<+TgK@F1F6>;N2(J9~9|Jol2>EK0IBiH{mIEGp2_l~0i z$oP^7*iWU@7&Bku7W2QMd#QtUfBu3N-qYG3UMf4yPm>%2o<$|&3Lw?FCCswS$#jy8 z*L*ZdraQnRBe<@RLj@iB9l%Jdw5%eUQ1`o3l0rV)n>pr#0xw1^C!WW6bs z8h3cM|?w@{9gR$OEr?vk~N#LI#)E?MK*nwo6-zyo5(cR&4C^L7S{EedvUhE+6 z9-&VtT)v!TgrHYL9@yXl3y4qp__&cU*O%&HF;N(9)b7qK3p?$S8o5jO_PK7^F(U}(_tRhf)zkGiKtlNL>+;o{lbv28t)~OQ^B)V+PEF;09mdulk$wq_IUq)w;xw{-mn_g?3?Xj7W76CW>;U0bvgJ zv`)1&GvG9LvPQBe{>yP-g29$q=E()$&;>&CBN3y@v{|2^SZYSKX%A+~@0ONS23C3- zU~i14eE8Sk7ZfO1amW?+M3O&PKib3h#Z-6Je;&Ndz>nb1e_7nJg^_e=js0h;{#zwN z(XZET$H7O~@iF(UW>o$f=Er&Ray$6bou-q2p?fth^r8>DF+f95qTfs+)zVDBkW3(? zXnjBziwO*)QQa~btg2AHH9W|#jk2sbTp7M{cfGSbtn3?yX{lkZ$%Ssx$xv33cD_Ht9r5rCZA+_!ZM^GUJX(AzBY(Nme zPU033?3um#wT*o)5N7mak$E5AF)YTkR4%Zk#|(j;((2A!Q%7NuRljo&1>8WcX8IrEGf|{I^gxlQ!FWW%Oq-@MoJdHWVBd z04w)!w~iZDr}>2#_O{4YTNCD&V}VOT#kr-|9|)t1z3-FXPSVsi42{N@!5n}ta!0xq zIF-?(k08|*-49Z_UygC*Mwg!n3~ToE6Ns-;-0yT& zosdv!r}ho~H_YtKm}ld|ShWMOp=uxN$>={tCm0A)s?G$b*XK)oqn_Db zf5dFJU7Xhm*}8C-YyHp(xLb&;?<$VwYN_aXSkqCoe1KslpIH;ev}%BhxC)YoDp>ZR z1$dqC?S^JEOk7+hJ&hGJD<)&6#c?fD3#X@czm9?qGE9M|6p;T&C4j+nYkaZ3pS4W8 z3ARrr;LvM|fTI2h5`VxGe_uTLLs-oFAx?1Fn?PzlcLc=NU8$h4@_VY^S}xSwiC*Wo;948{IIP*VY$?CAY&?KWYP-6|=)LVN zpnUbx_mb=9RU)`T{j-jp_iTJEx+6+%-4mkV0NbZCDEDV{2iyzlo)tX1MT5Nkz%3qHjjaX8=x059f6?^)y`Q;^Gw2ha7mmFD^616hCShKT|g_Gvhbc0AGzkvZU1Rc z|H#t7(YB3#7Q%JrB0B@-Z!Dn*k`}RMnNV$>eiNn*qrXCZPrgaEVM+7B@Q(lrSg<2T z2+%Dca7+%aM*KwqZo&@bf@?7_cU@Xn2VW_c(Gtj}deglnALm`i3Ae+4{(FXqMpLT2 zBV&H+wfRtXH7xa9{4VQ{Uky;62(%V7z-mwQ1Zg5N)^wX@CF3VFPOjUzJEWceBaCk& z-vVx}oHXZ$8Qa{mQ{*-nAy$0!;YGXBdrzOVG{z!;hC9cwwmu9#r~^3*3z!f_3awK7Wmn5!ePQ@;{1 z%Cs65ULB>}_Ldtp|CQFTYv$swmB^JiOwkl|x~|-%$B_JL&e(@^VAwg3q=?F*%~l6{4PM^-SMBTB)}vi=<1y&i zlcHGf?!&UDjroTzc+u_&Wbx2>+QQD65dc1#^#Y{$UxOWkQt&2YxI^%K8^#i42^WG2 zi$A*KRMr}w(1p!urEf+l`3KqeDV%kl;TOv%V}c4UuCk0_7FLY3nUrN^-Tz@UfV6+r z$^+|;ml$B8{{qoo9;ib82iST)>1`QUgw-WT?yaCRCq!GCIC9 zW~nIk(=4#o@Rr%%WbBgLSSq_LD}&{uh4uk`S}5cpW!QG>lb`C4bwg1^w-xOi zBy5<90+#}qJeT6!j=;36X~s#KQHuV1Zz$ChEAzl$MnD8WV!maTw8wI|mvwH&hWUD> ze$W>O*E3u+2R^cl3ETC(e23vV&T@d!3siNWr2r;fOH1f=ZQ>U!3~)Wt5E+NiYV#>8 zw*)*dylfO5XpeDw)|-Gct5I#6=9!h#>wGhgc>jRu)69&tsIjOuz_#RsvWMCwx*R5k zv28fQkc!;UYh<)GfS*b*a~xx@Xxgvge7mu;p#fegE;fts{r$n1w08jE>TLS$!QJ3< zDw3=#M>?A2p>Uf{)9hkC*ME;@D8R%fO4!WpFd=oiN(^gUdK1-FmN~e@s@a)zXa!jC zS>l-tt;fZRk3kl7@J#Zj%hL=OFdu9_vT&*7H;Q|Bj^06Q3ferHg*tsmzuuoYB3T72 zE4U-Z>dg9Fg$QhY{~Cm|7AYGE5CQ$;)zR$Ij;qvPg>e zU_jL!1-FCQe37 zTXwsZSE@6W?$~g5HjsKcoVjKFK%~zWaaRsLO3ndS?t{Eckj{LI4ujQPs68)Zr`!lg zE&vZvShN-03J-T!VQxjJ0%BteeT8jeLba@*6%x7z6!w%=cNJ4hNN?I=cQ^2wc0Av> zT> z4=am($a7FY-vXz^jH~dBJo3Nj?=p@By8q~a=dPMF)FE@!ufx`UU=HeOwHj4_daC=A zu(l%G(+>C@osT^;Ud()nlBteecmVBu4J?9`WaO-yG-5G^&ISqAQv+>zVa>W&c`1C~k^ z|NA!*e>7rpKre3g>L4aEcfg7_RI8&kmOfXM9eZ#M`4b>Ab30ljA=M-H)1xq^!-$UXx!?zYP_ z>l>Pj?8gv%8)O8!W_uwn`wOv#LS6|bXt5%`>A;m)k}}ooddSz&nNOiD4Vig zgAib@%oeACQ?VSw|-xfm@yfPFDmSINvo4^2lW$ z8=G6p1u~-H82$QzTI3S@mc}8BY*oVulogOu^jmdJp5Y3viC)5~$KRY&5#@QAgdeo~ zbp>|1Ea2uhBOkss*J^hhX=2ZDRx+R=m);d0^EXWDEqFsNt^3t?Sg9bS<=n6ZW3$Fq ziWy^eeQPF8cl2ty)JFlqr?HAj) zPJKCCH#Dbn5?bb|^F6JOrj?UWnlKg774>ZdY8wnaPgu0Q-x2(CxpD!YHtX=5cCLO% zyZg}_Wx#>g;&9!|yaz(sLrUG-k>2e6y3HSJjP7@ zsHn4rB?EKwByY`D(4%rSEG+~4OxJg#SzmFBZzP3ZY@GC1Kdrq0&WTHy4?h02Inog4 zQ@rnk70>wRM;+)rW!56?_q`|$mo0!RJvsj@)v-0WXE?ZlCBuCNg(g(Xl9fQRYr)ej zVCyi78}<0H(}0-_>hnb)OVDCi?-R8L)L3PjLoB72){pz6?sviaHH1X~bi9GE^##1+ zK|l%rzvNbO%t$Qw|51IEs=@W;xat5bKRu#f!kVgvt5+?8JBPJINXpOO={92_tI(Qb zJFwp;z7MY}r{FGd^u{R~I+de@ zWPbW*(BlwcppFSJ&gI-rws>BS|D7ZI!eB!*YrfiJSgA8ujoN8QC_oXVWfB~JJG1k~ zxbthR5xA}i-EZ4Y`hnY-2MiHEHO0zC*;z%NflECvR_Hqk>yYw0&e#C^TiPA6%?-$e zic}qnO(C&==EL*t3D;%HtdO95S|1VJ!d0D&)vVQvptX7rI?ao`3IyIbgCGsakYQFE zQXt~s_*<1?vXB-ZWSais`MRdZO30jX9Qf~sRzJ@-S>_m^O-g?0Z&9H84U~8$%$I`4 zMQqImdO-jmA*Om#L2ZK*d&n<4b?G;wl}M!`sj>2rmVuVWf?(pN6XRh(ER~nW+Z%iT zY#1DdDgTIkjPbayc0aY!0CS&pDa%kQNCJ)ZO(w8vdq1!p3aJLaY1i3FPruBl1m1}U zByB3F&3zMBNNRNo8oWY_^tskG(^eZg>?$6rh>d<2R{L|_{Ct#Kl*JL?%M58PC3;gY z-90FTE&hZ@?SY;L`kE&ft=T&X+JGcajA90KcJP>lT{A3aXO#|Idd0K=5i+|QRJl2i z(EJXnyCXLh5Nw@lCShgNJdiEIFRYTYqN*@T2Vfo;3-?NArh!AJG^MTA$_Fu$RXign z7p+8%WVqGiXo_(B`=Ll1yfir@1g>3=xVFehHV%7QksEjMiSPQg0nRG>L)ns!+?cYe z{G)fBcb;j}UxQ#@P9hU*1@XTCNbe|E4)?&e08v;;tonl$CHk=^%An1<+Yli0SaA{} zMu4qYZ|3!|JV||1in9q(6l2*`LSe1yOfI_$$9p{3$nM~;MuV%-oHGO4s!5_EEMj_l zk!H2#dsbUJr@?Q!SHsY*)5Qy<4+xGR<7O8c49<{5<}xM0O!rU_;^U~@eIErfZk*QR z{?xch-Ees=orggTWzU9OvLInzkt@)3%PQmCjnh2CO&`kfCP|Gc3*rCopn0nHZ6szS z16cB{cG7=~0jpY!dVs$d66Eul)qrNUL*FU3S78yuj+lx>&;39SPhP=a&2U14E5-up z>@#o>yMjcIk^>*o9{bJ!qwLEK8GnU)9zZsN#A3pVhI(GBYYyu^O}lDQQd0sH;^TY$ zJw=RHRcL-7PYts_k4~z0q6?0WwC(SpZLaN_<4)uX@%bNh zavtz5r7ry1I2iGK)1B!WDP_%Xn28%-)SGm7!IS4ejd|0c=Sy5*cZMJw5U|D^gsfR% zm{d?bVboTQ)hnBEqH!!HtyT;0G}E%areYr3UpJ8iIgMA z;)yo?s=ttrfQ)g=C~E@#^gBL7bZe%;Bkqc(t+eBbf8!I+tk@h73laWMS2ZJk;{z@ZmC>}z_`*-BOX;%AM8nV6v${VBE142jQ;KQ?!KgEbJZV+T zUZ^f4QE$q8;m%5|C#Ev-AWe^V{t-%Ui(*;vHRmlz{&Eub*V_X53%$ASV8KqfYtfA7XKO?ieU-y$qPsC zDgrHj1_RtIw}a@NN;IJZXI7sDwdb1@-BsuS8~E5Hf9sHzdOrNT4L7|3=1UyYjF5{qxt#E&cb zyzXy1uHDlYp|d;uxSw$;m2WptHQVYw)O#Y?*x^EYUIr_Ka}#uzxpxtua5Q9M=)mqe zV|A4kl&vMfy=}~mNNObO*l$`OQ6R}!SWVdnpqsUPwneY`zR8*ew$hZ*RjJt4r` zVy$xqg9YOBc?FcFUr%0w0)k|Nk*Y2(4sPj$R?h-#fNrFYgazu{9N74o6($oP3dSJ8 zuf=*vBRWQvI(l?iiQ+xCR!_!kbaG2B8A!@YxiYZeh`PYnWnYOkaCbXsOf5o9WUQ2; zWsf;N@KDO;>T2bHKPhkuo8K^cmP)XD9T^^V@@dQK2IEm>WzWeIykrw-S@@ z1AWI$_NuHtt@#jCbkoy8T>?t1OS%%lbQ0q%5K9_%tX7ii{Ims}XE@CU&bR*>%q@LI z?GjLpl|F9jv%33uZjD`ILxR++vvgbk<5Pk9#lxjpd!d-a7bDWQ4CA-e{9aPm{7?;e zR`S1va~kgS;II+mTdJvtVEI{XwbUw-QIlLOmm)(_w9{4M5~(Mt|| zqW7PjGZ~P24Z8bZgG!OCA#N9WC#%hSFRs?BD>l=@v;Z&MeQEC5M$%+Hu5-pfX;uC1 z&zI|K2QFWkv1g{OFyHm;1ahvrmTnJ@K7^${h*GYX^#;p123lv>C}pl1E1KV#cB4m;71rBP zWQ6GJ?wcr0h7)8+H9IlV;R0D@V)a3{{`Gks4G5Xn10nM{c08^YpU5?epVe^JGOi#W z?#d>akiMVkKFBz(z-4l5d6fH;x$0rPJ;KS-vH%Q=zM%NI-yRQh$Ho)WXL!40TV^iQ z_n+T>1;3p?4)|sIAg0Gu(T+ho^r_}7nj;;a{>s_erpwgt)}DL(N^c!Dzc7&vB8-Ox z%cfl*Gf=88-KJm3Skr*y-a7vP1$nmI%m~fX2++mm5muo|TzZ!>2)WZ$ToDxZTMz{p ze;UeCp0UIVxt~gf>{rhtP&OXAgn47%=}R`$uh#eeu04SW5~pWlX+~OCSdH*x}?j5@2KOe&^pCHsrCvB z%N&%L%>|N#k|p{b^a9!YmqnMWwJrd!Y0LF>TkuMr3;neIG0aNgc}7+rbTA$}IP;uf zBSw3r6Ya)ifY2S~wZejD5-~^rZ8uUWGZGF07?f>;S_^Z*RT$GRQ1lxB&rOnT35HL3 z3a)`SGAbe)^GGrsrX#>Itp?4FbB$FRZ*m`h7+XF1uffS?a=ho-Vzi($$hnVUC(FWh z5!`*2;jQU)-mjdiLAd=6c6(!#RaCMa;@e80fMRbI{DBi46@0Ip8%b~8T{zfTQ{*Tu zI(5ZeDaAkQo=L&M6*Mq6v*)1))CJuV^mHECl!v#wG`|!}Ib?(KNzl^>p5Lys=N3#WOQa?a-0Q%-kx4_r{-Cb5N*@}Y0 z5=`~ol9hz5T7PiwQkfeWdxjaVpt96PBL_#1>;7OaVQkFk;;e9-X80*gE;)L4*nWp{ z3fgzaQ2F11&8Rj~C+yoPoJU88=N44+V|LEYWsb8PVTJ4B+eMOE3H-~ad zsT=aH)@^$B`{C6hI$li$GbHP=k9lA!qBqd4f>+NcM+OoD;jx)`{c053KFq`o%~e}{ z5$3&lmHvtmL11Q4ACw$~gLs_*KGL0aqrso=lo2Wjt6jI`QL>h+BCjbrJdD~cg*1J~I5!GZI2 zy&%GxqgsC6+nVx;o#Yv!J{{;fR(nMPA0eX~YAPPpEk1AMr#hhV{%`L4!%E6S>SxCK zmUG{jdZqnB2rh^=Wd1V+JrPW|RZ$6e#I{=VBV*a1=YC`74)kSbG!kknd4iWn6I zAwpC{h6t$@Btl43gfN<_6)F^>j3Sc^0l|;}0aB%i5FikQkWfV?c^-xElnmWB|M#4= z-gVC2XRZC|eAypZizNY{;l8i?8h*nZ=R*NUWPsIojEn~R1s{jo4{DaLnvLwcO9M90 z(m5>^*t0=0RR;_v{em=x&P8_~XEoQ3U^U%6;SAZgb0q?3jp^Pn-HqSl z*?28YE6x~!;aSCxYCwJu&G}A*GoZJyArI*al8OWt*ljGag1T`ESjgi;O#OufVC$vw zC`T@Rb%*tsOqENK>f2tfPx)YoV@`HVajBQ_)r^F8Ket^ne2}TpF@$_kh8lE`VG@B< z6l%~@Q3*1r5)E(^eP>M+;&x_Q5(bqD3UYHF8NHT^8B25ZTr#c%aD2KY&Atz7W7?O{ zxW2KJ>KjHndhhf2nf^%KaZOeYe}Ki|*+$^=#cM>%@KeEIjPtKM6;~IF+RMVTsMyXHqj7St*^a;F91B2}idsWsKAmS9Nv`Wi8dc8C^9y4F<1Jm1;9orHetS z9J}$snA9A{FiY30QxKcU>yV{T5?y5u37kZ($8*<0VVQK`JrdpJ)tD2uWk{kK^lryj z0bc_!5*ndhfJgEQRnFa92Her=zqOYmT}RsqP1B74C49U&M+<_$V3Zxt(-HxFga@-O zP-AxctFln~j)v|UOZFzJppCaj%$K3xSy9St$>{25S{1)?iT`Tt1=2Z;>3DiCV=)pz zll;Yh;&ue`z`fknbl$jemvH^5=B>1*kW+F5-MK5_(Q_mVc;Xg~yx;TjJL~y9fz|O{ zDD2g^UxQbGiQAoi`a6aJKah>=qRdpnqS4L|pC7^aX6fo(l~4k|P+QXr6#`j%18Pi@ z0afH4x$^LH>OMi61Km;-o_#sFXW{6b$>4$nqcnbpw?Z4Pivzk_7_bcpI5R49l+{wP zf%;jbSkj!zol?ZZCmxqmtH+LnD)=?%={i}b!T0YXDf;A0W`fEnZ@?l+<@bEDq3Ev4 zrT`JONpb*3leIb2_e5IW#0~cJS(qX7zKuJi+K(`yAUQK(nsyNym$T!zKp%S>LOT6* zReGb0vS}j27*|*5vGd@OYvJJ_2k5bU<@1DF{kt$S2y66h+djrxoH;Nlb-|OnZu2)V zg8eLjWq653i$q(Y&ev?30*ZzNit<=69s2?)lA+>r%;A5vZt-@U4{$^XiRVeqE`7}&BKZZ^$zcZ-ag(Xe;vMT94w;C{F&`=__&-gb6sf>YRs!Br5RKq zlqaePfM$xZU2r7arhjupK5Ww*{q9}g@4_O7R#TJnH4Xh%-V5Fjk&SOYua;*=uCeBG zq)VNO8KcpPdmx0cl5sZFI&T;yz$D|BT=~3}2R=zy_c)l%gH#9El`Lq541{u*++ZHM zu|7vEn@^LrS?LOmdq=kQ$ZKkNyxwxI>rSi4Qi3geE2G(nKbp#4jUQ$4wXp;lS{KQj zl2^2|Q;ec=Wi91gu*2$_6ST-LOuh4Bc#RpO-IcUiC%#5zq8- z(!9O<>`?o{Fvq%aZ`Jo^fw4szsg+TCrAT?a?RiKhN@b64bv98DAq8MZ{m>mTJIEB! zwUMA)D&D)23vsIaLu&!kgHzN>FdFisR)I{qHC-WTuCf~*9piqA#zwp-lf`YY}cjQNk7`V;H3jl!hCRYK?&5LmXV!35nW6( zK;OSBI0_r7tnlkc0K~a(p0J5=Vd90$Gpe}~-R7_;jsxloc7xU!4r&^O9J9*zb2?j- zq~*MpE0D8q(Y#@uW1<@A8d^BQh7Nw|IHe2&o<7xX2&QCmv9WHjFivTHia8e4f=P8; z=lgT|8SdAJYyF7`&U0^=v~AI9}BUnphek?4jX+a(vq3hIC`CMrEE+$t#i(W(d<^{yr5~i|*e##ExgIrQL)UPW4e}e#+zEey75o`5YmKg+u|w8p?DK%Eq( zV9l%v>*Stb4n&lsOf}kgH1vW3cRKB-k1cP{rAvs?`*!mHnum)?8&=(ZOSC4Q8!_QjzkJz`LH*?j=R@Rh0!h*4l^ zh;$`SPqhzmDnb&z=(&;rQ3?9mf=6BDbpFt9aYv#&$AQvW`G<=~h|N2HZN46}?duT( z?zM)zgY2`pw+@&H{I@amo5ytC*}B4!wMmh(63V99N0&y->!1(w^Et&aPY##;zP`vX z>1OeS!Z@w}k!jDsd(yeV;f@vWiAQ#rnL=VFc6&aN%4mt!PLjoHzaLLI7H);9L3hdbYYbWi$lU-9Y5^5i}L3o$G(`M04D zfDs5ko<9nP4amhZs@Nw^XvkQ}CCKXM+zAQ%P4j0R`)1gQ+c{NnXL@LxWApqTe`vV9 zF~;vG*{|oqTYH|=U40K9%GL&Agj>T#8;LUUjBZ7!F}@8gS=sd*F;dWx65vGa#|Q?E z{EX)=>c?`H9`O3=jx=BCP&qV`;#BXE{&aI+yf7n@j15@;k`jEu$TtvsnxH&{M78-E zR?FBMCh(|HPG1Ci!$d}dJ*Qw;&(rhQp-@Nvyj7g}M@_D--@b3&cH8e<`Ie2#hBfV8 zVnYTn3(h)O4&YIIA2l+pNB$SycnC&~$#}T;Gmv3EaWI zuxADQO7Sk0{!R*cvXR|U)4v-jU7JDaz-mV!dok^Pd#iq3gIgB@82kJyS6(vauiuAD z=`3KizuhACaO*orc}2yh3w^a&AATR=1(o z;`RbvF{Z2{6|aSJR^j{((aiyrsYK>BFhFGsNRobW^og>9DMycX5XDTWc>kJ4Xgrm+ zt1L7cR}$=3TCqP?8^nnMH9G<&G5JCLfOZ%T5@b5eY?kAhztTLQ3IR#~HI7+99cwP~ z?6)OndpFlbDI6J0RRDnQRcUG=+Vj)AzG^&Mk8~9-^ z3pN5|E(0Jk;N48p9Uj&ls?byI0-Rek(|A?qrz2Zlz3f(*<_Ak6#6KK7dEPC&=kBf} zqw+$e8v;C2Q;i`amRY398yI@XkZbJ0&trt&iJ_Ti-`g}l3zleD5oi0wMCdaosA#Ie zPUO0eIGGul-{Z2+{OAbVMT;I3g5Jmv?YeFFD*|{?pydGI=eKLS1N@wR0OIbBZ88r! zVcP%^lM(m~uqj!IU`)r-2g1$2_?B8#fFjyWj#}?GS#iz&ew1m^XPFpa0bao<^;X7A z;}RaGg|GTjG8dYxr2;sX|JP5R{ZIbw|EEvmInu-!dYiNscD{uimK|nat!sUu)BCHl z2R#k!_un&Fw|H3kpm@>U3T{SwNJFPR%mH` zr?p;d+cI|-t?T|apdW+VPCe3Qwi2d4*5=4aTd53hh@Uu@mDXbF`lI-&g~-n7(AKS& zQ_uUn@7$7Z;dbfD#vdnkSfbvN){W3b(yAL-WT>l6x|$oQJWbmTwH}zjucuq9cD#aW z^I$L3THo80Cjg#MAz&Ghb2hU*;q~zsw?)%;yIWHgkgUfRQyb^r!XIM%pHKg{=k%Xv@?D_%Ic^VmwS;jS zfb1n*XJEUgy@f;6F@bbF=o+7SOZDpK29&9OY>acO*3tHnfkhU7{`cv^3!4_lr$vy7 zkiHT0Qt4O~wQtTUYE0T@sb_xpl}NQO{3AqaVKa?aE_n0Mh~Ma|MwcD@-Y2Y6e6E;F zlv}36n9B+J@Bir@XbveFE>LJ@G61jhLsBA12`;sBFDUEKaB{B<*vy!$L7gf#_R8md>sF=KVqu${`vPm2E06)91;+CLwO>3 zkJgfs=a&_lH5Hl+C6Un_Vi(SfQf)E<=*iFZnkIEJKd>VV%;^JWwg{o|5k6^Z>BE5< zpf{z_ML$P3VpN+0gpO{`icgFx?`dXu+^3a7w6B`+1SRZ3c7CsU4|?}8u=V+_NEoGA zD^*^S6h8SUs-XwB9>ru=kFZvr{$sYXk}2E&Zt%Im#?P6@{&_UWzc2+_BbwEb8n#kz zD(73g4ef^|jMZqazwOP%tc*r=y#_yV_?Z}dAD|E5|k&RZE(Es{ZYYRw1jgYOoisEfiNsMJ0 zENv$|As)Lk)EXTxO~q}eVS)VFSK%osXv4S)qT|aS7a;gH5-Jk2T6kSHwAWbkHZ0&Y zKaQYyjy;-t zuX3E1{@5pcHHC>Yr|SOakAd1F#MIRl@*4+1;YwDQ#9WZ0_fF+dO18X03&}eSOBR|k z%j3#jMhi3Hu*mPQTZ4>}dF^E(l){vJeKl|gr&Y)p$#@Q~NO>R>Z-70e+zU!aY0V6e zB~R&eQux&^U8CGo=TyIKC;4O*#7hNT{8fCw$#f1dN{%4L;)ySNrOgH|vVS6tddjV4 z3#(!tvj(Md)(ipGl`&Ge1mzLU8-FyIChQ4bY_AL&osG}L%mBIjw0<+2?rD{CVW=^# z`Z}z01hR;mo7L?oMG^qLV7ei&pZTn#Ku{no>$rYR`>E+4=rbVwGS$3c7y$)tLh^O$ z2YhJPfLK7Pig4WKzm2lOLpCc}y84Uo`-YUMxYlcaZ8AR7e+6!XnTflp-sl6EbGSVF z#@X9f-`+pa@E2kLNKO54S3AX+>NHk2?INSx;%nE0;aeBXbP-WLy>G+-X)Z>@CxHq| z!31xM=rhmo(~^pl<#XgL_LQuo%(GQAGK{cJ6Hr|R!YA*K#9n*m8lxitNm84!cROpy zcS@0;AxkJx>Qjk7snf;Y{Z>2F&jZvYT#IK@UnUIcn|OT@u@^L5r&P9at*5}fm+PX} zbQGdZZ5#X=zasaDDJf*v7ry}wA8@IVa7GU_Z?SA011D}7jNu4)< zH=^02>D8F1vuWml>QXda0LEHL{2H&aJY+r4R;x~1SXxiZf_bQZyJ2g|lrY+^P6(dS zacQZ8gzZ1wh0NU;RL6H?msCFM_pC^QPHx4YcV0-9DVMA;uC zb5d}mEs10wFwr|A}XI0>EQLCGrn=iDr{h!{Md;bK`2!Da5r1v2^Lvm?Z7r z)zGUO>Te-#aehAdtG_Ie@X4S54PyzG_=SOoeq<}1x+M62waz&x?ma;jYqryZ)f_G! zsH_FZ!~*Hkt?1)}^a$RNM-f1`hB!$juUhD9`c;^GvbX(xWawNSViXIByv5wZ7Pn^# z8tOSBD^*|sCdIh>Ld{E&@dtk!MGn%fBm;Fh%H10FV2WY~peDG;tcb_Rd>~ckzw~sJ z`1gpbxA7n89eG0lnupfIQX=KYvW}mdw<#V9HKw`mOb|?e{LzM8h_h6kh5#ox9`KWR zwzMp4vCGrqBG&mSWr{|%Vw9TVwYn81$i+=Wr?yDTDpwq_TDTN~=dQPq) zj-IbN*Dy0I?Zfmt;-BgmIZIHnhPoa=lWgIN1l1ZyK_sC3`6(*Ka|wbvQkl}0(QxIl z?BzUJYVSvyw`UN>!2tGm?tllbq{uXl+sf3 zI>$_1!igoF?z7^O@&MQ>4Xm{pbllt!k?0fA_|uRl49s_;jFQ-A8qz2G-F1nvI1lGA zFS%P0G5**%!hu~CMNm-_S_c?bI!qUV7oEaJN5&9V0pjzZ^3;Ehy&+d9a-=#}UEN-r zF5Sw(FDC=!xVYI7kom*H!)O*om}EKFV9^iK4nU+NK7hq5 z@jzgDWIggK-!+eV=PhbAmEE4R3`hi`)(4lBg*u+ii7WRXp#nDQR#@*1mF(H??N--c z8Wx%GqW7{rdGfbqUm7bw*D;4Q7^blprh`+eMqRAUx_N_NRi)`R+gAS$eU907ed0#+ zA@x)5ru#@_KqK`0u)Mvu>-KT4Is^84XKu!~93^@sZz(|8A-l#f#;P#&Z(KZt9}8#U zlSR$^6*Uzgn6M3PE4DTlP@_9eLir1L!wA-LYS1yL9;PJjZiHq)s<^e6DEM`tkn_s| zz4EwQ3e!eR|eCG&lK zVkRKD>S1?FR@P=7KRGlkEUtZF7qT;cMmuZY`|HwbCOD9Ya!e~wLWu!d5s6$Yr9I85 z$2LXD*HInq#xI>85sh$qg?4>Fz{R}xi1}e4h%q#~&gEI~bt$=`c$jRQ>h;S&;jfA> zoi{R0#p=PBF;@X0Zh@~`BTdZN1;zyQf3?P3+e}SH`2v$4yXVzc7@KMCVlY%-Z4SvX zzT>>&r#=&cRK{Ng+&=(=^>$IPv43{1|NF_zvOhxO)kkmP_CQff*LhA|7|BlH9ay?d zKnTj|@`l3-)08KMmQH2(=&Q02V~?~sjY0n|M28HCPKf8~X@0S*(&FDf#-xP@LK2em zhtBpV>8wpfX>?n^|)b0GWC&Ee}y2dURv8eBfUDjOaEf=rG#D&-#0U!=!hG-QfhAkD+)5eC|V;RnkHBZocH9e%IfXp<*L-gB9drhmqp}K&(tYpG4gN{Ms%9rjN(Z4x5;~y6K z<;|>XEp(+3617M1ERwri61s?fC&XT&-1>C5rl9T~(X0Mpp8C1QQq#}Yy^1x$PnRfn zAW@16et-D;33&mIJdu8d89xJ<=+Xce? zJ$`yW*u@P4#-pdKtJS?M;5bA|!{RHZc*d1b&QrNm*LQM5+ZcZ3;W0jn2*`gB^0hhY zinNWss+Z;>E9nTMxcAUVd7m1}+Sz+v62DDW{=&0Sk&!?pj=FYS9@RE$z-ZYunnl`( ztZ42Doyw!ewc5OvGm>#Qs0*YKTAym8ff8RC#h>2@V_4V#86V|SaPRTYFW!gVANJ>z zXRjLm@KWcIDr6}?Mr=l-;<#ZD@jv--d)GZj0pdmrfG}tN6Lm^}fkvogRUe2*mur!F zz*%fB3fo0kW7*O@#i zIRHkw^#j~Kfm2!e0`?erMB}aLe`@!u&U8=bp)W@SLWU7P5RX!sA@li% z#~dI=lc~c}y3yKc-tPHnafd&0P?wz=4Fm|B^Qaf7A|mGLut%iK<43CRPGgQ_^5jbn z=;Z}clKC7)o0Vm|5s5>Xek79)x}NQ^)7vd=UL*iCK`_XGHQfi04#KNi?nAHM@JJw& zrvswn#krKIEHN>5Yb))!G3+ss9Za6Roxf20q4PJl~f$aHE*DBRTfM6;M@>V1u zs8aNLRjfJ#=Y&LJP2fD4S9wN;aX{CzPsLF;fdF^3hZmqgr<@a&)4KyAw;9rR_s|X% zj|@(HrT+nPY;;vuDt`1y&OsQRGEb0zl9Vychisa{@&{SW5hLUfD7sNXbdqI-)z`FR z$heOfhf?i_jyu|f6z&#f3GQfA()}e-n@%1)$R7=w-UX$MGK7p|Y0U&2)XqqeqUrnV zk|JWHNq7?;m@W&W=&OLd18VaS!s{B%XsKAI-NV&`W@X+9^OkzMBFfAKBSv*%yY2ZQ z3%va>D#`BLi6_#~r3Tl3|ME@;?;PN&)<(%lshZu4OMu_x3g*%G9-HNZ4INt9z+JLC_F2fR~wTTI!|h$7=p~Y>9FeeF*$;T9HFXVsQew^o|Td-ND9; z0S`5jY(686cIe4eZRHa3!7GEjZgJ#(P>Jb5sb^n)Pj2t-8e*mXgYUNAyt5{)W_xSs zvWYpb4KN{r_&tdHWi*$kXkFbSblOjLvDi<`+uZbG)Gw^@)x1AffAd~l^m^q6lHKDx zAg7>{SmW&8I)a9~;^w7#pWyRd0Ba7!$5VD>3XB)u#!#yLLKE4{L@^REnV z{hb@kJNF*zeW5kIr`!`buixf+UehZ|zy`?(`orf#4ovkcB&fuiSbur!dNQlZrBtw< zee>GRhU0alPu-Jy%C0P+>wQw$R&Gls^Oz<@?ZSv1%!pTQ4~#lc$Kk?9`2|#Z|HsH> znYuA)zMp&c7Op+AW$eb!t9SkEq7^acZ5{)!Xo?O`%STcG)$T4>�+|+J^A>@)6|W zQBF6f>?m@g-`@B{vGeo|-?W+0+Isui+UuRpO?7_=OcL|K(nKL&n@09G_RdMyB3&&x zyLuyh*7wcM-ebqxEvu z`cEUq!K068E+ypXE7?zNy}!4sDJk(kFDUx`^QMH`ZOuNnwGTG+Wv^(Zo55a>wd-WD zQAgq3X%Xj%p^MtZ0i_`s+V(@=-1%4QM2@`wQref7_q-(U?NOVSRl9(S?r$B{#*~i; zfMBdjppR?TF2FhX7R+FP;?OD8)VzlO4Sw`D5~h`f>^~UIX!h6G@YSlNS!2LMwdK6# z79I`u`Q2@d=7YRVE7CgPA;njp1OI9{djl|u`q@f8VMKeXtHMsXj0D;v(2oagr2|qq z`ZnsdMT>N(L$lTn(!>M^2HlG^LyT>a56L@&e$V#wb6a81gKjFp7M{}9boVW)Nx$NulZ=1xY-9&PojpDSF1XZ0>3UL| zsWKFufv-GOu9-rw1Q=Ogv5=$>3ip4t(%98q)56i&6(l49wKjxzFh%arCebi)4k%!BVH$J`n`Yo@VA+Sd!(Pur)2tC7;J7oSHl4p( zwW{nBdlT2wA`xeU>V6WAic?|aCE%1s zaxo^zMrA7<-4of-wTjxKXK)_s#bKaFZ;L`A9hlftC?+s_AfVbRKFtP|xaLxs=X)MZ zp9>X3=WoU?Ojm*iRL*vS;Uoi?M8>+dT~m3BG+3dWJbM?90>d7((8|K&MRyH9b)xJ% zHQK|CZaL9_%P0(SWVD{GH^WqmBBpdr!Y(-xov3+QnxL9ARGTqlnpsL0v)AwrsG^}F zAp>|P>DrB|ofN;k1g$uL8%cIV$45w+i>w*;XvJWWU?Z~7Y>ZDX1C54~j8rtjbh3+M*ND`q;yT^bUrRYK`)c z=8aT0lAR>o5J9-bLxBdWfC6AaYiWDONXa}~fW)=ybaE4`&>^r`aSOi6-(=f(MbFix z7mTdaosXRA0UenLXLC;xo?5zuF6rUgK%(YoQZql^mlDU*CHn>?0ov>dfo%NlRe+w& zY6WA7(qM@_9}*p|>mKV-ttks}#3x3oEspomtjGnVb0G~MD+9<_uZHYte_EjYso zz~#({o8u<{d?IZZY;+FJNTh#*5D5^4c~y02tc)SI8Xg89Ez|4mzPotYW{>5u0m3Lr zym~y9;~d_kG_m_2ITtyx%l5{z@_XR ze?1*++rt1HFf=On6tSXND)^IKzzdl$T{KEbpj#qVqwK+lK-Qnp++~_yNgb0p#PinCz5}ZX z`1QGB4-mW1$tsgc0hO!V%5tZcCOjaQYOyN~ecq6e zS9pH-_P1K98!rI!)5=n_R;|>d?ScRt5hx4&+>Z|;4VHi!ubdI=fd+E?U1qev9<<#5 zRXzfc-N*ERmNt?c&Q!9#a8=L1N zdyCpI65X@N2Z%o^ogVmAMXE+TASP&W)nH~WD)&%{iX!B`ay2ak=C|-oLhEvtfxbTh zC}tC6C1_OeuLxtQ3qVJ!{7BnWxHyn^?0(hw3UV1w3*SGdtJa|+G^?t z1P!eiWj7b^c<0$t=>`=o0Yc5NbR*W;dT{{s9?0tJxL$teN#wrjH*h^|JKq;x9=WQ{ z=z52Z3lPJ+q|Ve}CpAW}ju10AH5%0(%V$cjtTfsTTT8k4A(>(@5oj2ZoyNgU1z}HND{AdBTU>F@qIt zq~(#I43Gzc9tGV-wF3H_2eTA^BJoNW+Kq<3h~1(ZOHv|TSN%t+Y8NDK!f)Asf!S`h z7mm?%0az^@m85dHzMgaQ9nD_-j1j_;kb(XQT6vi2Iwo#s>lF!60F>4YveL)W>e~8z zl`EW+b#sgl{HL)&koP$?ADkMEH$GYsyI`YxDn|udU$cQq5jwIYKnE!XAYP5;t*jTF z2#J|xZL{-H7A;)~Hq^->`CaLHnhvJ51G8d;cAy$bvbc-IX&h@1^0UzX%z3bihbTt& zIgtn!6}F(LCQ&w<>OveHN96vBVQz2AF{i7S~ML9`1@@^jFzY*`k(w)BNpX8 z2zyi0hM8UvLR7bdlOgrb-@d!(oWK^@QO5c(T%x`Fn~bp@>&HoC)ICC9G=E(whF zgxifBa%P;$34Ybeu3}CLpghGZq#5oQ4HANm>AsJR@bwnScp`_86upo_U#HrqzJ=2# zn>hp5AaOGv&D%dUVg>WtS!-OC7gQLiOc+PE8Vq&Fq$F7LzTqbT)iS36u0bAoYys`1 zP6264BcoX&6f{ODPN*{@E0`J}NBK+@0PEb2q};~qa?m65)M0# z=0q+AEe>|lg?C|GR~bV~+FV#5jssX`U5YOCl9bqub+*nfoP0sOemGepq0Xf_hcy9e zzE*`0ZG;$Ubdx~O_zp>*ZiN&HcDRV;aQbA zLm#&f$!pVKjaQA&7|4fz43;>LCzsDv=!GtNl~JA!O?Tp1H&PW!$sLo3f9u1)^h&???5pa!5#B{bXf_P{|&*cdqx z%=4pMp<#X^_xhOCPkupd8XNJ&?$YVJK9!HW?9$D7(ne~^N#c^sdSSZKliSd6{{x=I zt|6#4cq=TC53r~mNPP-`nJv9nS{c<|k<_`LZA2q0QMBkjjaCO7F8>%+-}nmtm`2V+ zQUu}*Nx@&$!&6a`qmOb&$ct&6{T1aN&!&vLjlSGsfE*Qa6Y{tXoVS z(SOGes_W-re4Fz~2%WM_wGU=YPe6W}Evk*kLvmy}@_>T)Fn~thGunP&TDaWGo7Bqz z_UzsQU{1WSukOaeM4%>a9uiMm0yy9Y;FUD`hh`a!y3M$PWPgHgi%7tboJ!oht`AANe4s6tJ(sA#N%yo$@vLmySt0E1-qp0|4Xs% zzo)_8Z_%8O@z1FbfkgA`{7?N4rIQD#j+0+ZoPs6fq_LjzFgtSoolV+qh00y2AHNIm zO7~v=UH9-KLN^$Sjb*f<($Kn`S;WF3fx9e^Divh1@6D==0Df%))mnJArUCYy$}wIt zXy}_M$2za8C?UG8bTpl-eBTvf%4y*g-QS;}5WOviJ6h5+8Lg;WH@tDS$f;bF3q+Zo zk$Gs3$)q!;vX7K_$864{vQ{IpJ#KsNOgR^iP7KEw&z^{JwRa#}-=};CO(gt=_u8*9 zpjnFMXOW*uaa&dRO4v0goG;s14Fxf+II|oZLo$0NF`~B@`=zxqYsT#2CAX`Yy)fw( zS9k2CRT=l%D1Qy|M)G>Z-H8EaTdwdr!v}6P#}wV6im3ZdUsirUvoX)U?fr@FTc~Cb zwvY$0F^WqN$TDb~WzrQ?a~E}SU=RJ9Hj6|qNlu9KEq`V^V;y7V_3@ubQ_}fr`G{3D7IBDA@9NcjXx01c_E)Hd52&Ulup`b z&Cbnkn7kpzp*`pd_oQjx{v#KuJA1|+EYcM;?^OLff35C2NS{*-4Bny zn~D50JVNa#aH;tc_St$@--C0v?pV_D3thb~vSXmxTQpsnZsSt(iLMJ}#LsdbV}~y- z2zcAN=iD2;{5}$FPQMggohjr!{Od~W0>P4UY4E+<6V$k;FtZ|I9)D{wD3xXt2w5~S zD9EGTc~602FZ&!EHKV^Sy&+!ZZOlqlU8|U^^X*KyZz9+{4GKG=uCTuQ<2|R4SucBK z*GWd)tnxBQ(x6@vl$7m=Sw;=SZ7XWUzxY?{GHY$N*&qjx{D|lLS*wwRY~eiZ?oxCq z@}~HMt$v_~888>lh$F4G&h5Wgk9a~7IafX~?(L{mZ>zUu60M7| z4j24f?4X=upS2!uy=+qA*~-x4nNnLr1Z^YBAn)X(EaSF(ls@A9gIGIbjkvX4dL}5 zEm_^qmsOyqa!kLBGLK-jmm6Hr$ z@Q)m4xd4J(4PN)tyH_2C4`su<1umi;502k_;CiueXvjD3H7BZ}b!%-}>*z`C51{FZ z{uK?mkxq$qtsXb7>*$swpV znKUo*q=cE-feOX>dMQhxN*(9@SNjLFhyJFz%2}$-)uUw#i}ux{7-S!z9iLbk!elaA z7&Tojj5`c87hgOxSvlofkw!ZkX=X(JCv4A^kS&;LQ@RsvF9Y#JL&RB$xG4j@`=K&S z(+Z~FGZwPfAJnY}sZ$Cccu8-8l@uLV_;X!!3e{35T{&;rSo)O_G(^0bcCjkwh|=JD zGtWFKMh;|qA-VVr^15WIZe(*`B(cjh^g4oRw%Qe<6v?&Kn*nKElll<$nLskJJ3ABqZc zF3+SLx_;dvA>Cxs^41>U*=0e5$*!9|OGx(+T6k+8ljIkyn6d4sp$8y81Ag z*w8cg$_Z^SS$46oWn@Er&%BAV>p7M?bWLPiQp~w|_4CZ%Rn=9=`_k#U2uqtKBbu;N z8yXeGc~p(Eb;MkZZlp0VGJD~E&JwleJB&rUC zMP8>b_!gGz5|)6=@_Yt-KAihmV_7EX)COM0b81Svari5l-Y=L{$nHrBGsmbVLx5q7 z#^{$~GN)FAC@034MoYf7H(Xw5h^R)Me?7_VtqD8oVbI2U_Q~rD^_LNeq}z&P z?$kkoD9-coJGP{0v{3t(hF^nx(ZO2N@`!>dq;PbgR`W)=w`ctwR6Ijw3liLxt5!tixlq1j z^n7;(*c!Nk5csH`$(VJ8tb@IUahcK5*jNXV?$}u%M--+;Cq(lH@&&~c-W#YwCE1I^ z6MqDggibsBi>~MHMFamhMi%5BSmbP4)-nkiRq^5^yBS{~d-Jp~*fHbwQ|M!w>>Lo4 zC@CSWr#50ZDUa#fhbF~04u&qKKXuO5WE43t?VKK~?C*d2`MmIwHD&hi%=AYxf~n6i z%|7cCQdB%;s4;-D6-i?qZrr^x%bh@oh?wp4J*OHjEVfI}-ZWys6;;#>2YIr7gi=Bp zO#kBTe95V=c_8u-na6bl?9^V7$_O?CSO%B)*>*pGn7{&fpF0T-ueZ3PG5|<2)LOsy z)Epve8EL?G;?w{&TKE&w8&cN*q~G86{@Y)X*7&;LjM?A7s}j2aV!iqM;1TN=9HgUU zAT(3Tx(c#_>>GHnF6E3>RC@`q;5B0FO;20f7K!FN@uzBfB4==fjZ-g%ksJwrv)_4N zsqlLK1S)K@_oPndq-G;LGsia|9Y-pj2UjAm1kpDhj;_84W!%LuTl}KM$NTrsa=ip~l{_)HhUrkmw(hCrHmYfQ01rUalt_M23a!jP2XB>_a9x zj=m%09qn6|HzFuO5-2*7$rq}_FWJq!wW2KfOxwpayM*kh=WT@(Kf8`DbkDo(5MN|A z#~n{lPz_t=cZvRfV1T$Pf7Ep2wKWGdeGJHrWkAEJi&gsY4N)^1I*FE44YiMa%V9T5 zc9%e=TnFVY6&?0Yr0c6rK*_`jZ`=wo_RwgUjTdsWY=}najv#Fd0aECgl=&^AuwEg% zxM+?<8g~Yg*Sd`6b+~V+vzoOsrNfl8sv@3!6e6iMuaNKU#$Joe4X$Ri8~|FG{c$T7 z`R!~p@<8h0PX2B`P+)!IaeXYI_~YA1{-!BkbbFc?9(VS)Z*>;@pc47fc;zWTd}G2E zz^wWCer^=#916|$zv|%`(#`;vZ|-I}(DT9~yTkAT+_zOojNByKn4w4WdlybSl}o*6 z!ZPfLl&9^s^8b3SdoK23!mV_L|5P!R+#VG@o?>T$N^-pSD4*=VwhsK^1Qp&Wfrg3F ziNpGAY7ljeO*tR^+~VQt%WGhr?72iiO+6e&Zft+@J?vvSX`Gam0o2%*)9lpgnpG+b z@I5N{BywHGZmw94Fq+IzJ!8l2N(ZA?-SuLg>iw9o=jhW4a+Le^GKA&*7$+_xOYRkv zkg#Yi7#FKF;2<`Nvmj0qUZQA$GYwJ_6HJ(GMfpE?Ph5339##G4Hof5Z9hW z=#P>RS5!P+whem6!F{Ww)&qcllDbTRp1J)jHqq2Uqu+cTzW)n+$q^h7`fGd$Eg+M_X@T^5>05N$U@Wx4g{X89%yoB4z3 z4iC?B9{qj#T-zr12~eB=HU!Q^tgja{5Z~_SKYm)oGk=eic=r>*F`G=;fEb36ym)np z&$U3q^jhB=k3$McAr1Se6&}eb!_M_7dUKK#DT(BM;Q7+=`tdQW-E{(lu}_Ok(CI9g z%vswf2agyNh=e>H6h7JgRF>xWcHyV=u2zMFn|Xz^zo97kQm-A+M(%#M>SdB`=l;V> zF)U}2ZUi@u6pxj3;^j9A@+UOrP-J1eOm7E97xDUnVdm_)V|C4%cKzbbn?)T!-eqXe z&U8o6JDQu7;`TCVzTxndq-T>qL{0Fv!UctOGSeS`!n9Z3^9e`vopKsp9=4pc4)O0n znNZ8UyNyw3|4_pl`jd{P^Up&TYsfP7S~3CklifcOGT4tNzFk;)OrJ);0=ihF`i|3w zJ2<&GGaGN`?DbU`{hLM?^0>?2G^ZRgy&^r)6X_I&|LKmn<$ka@YVrTh_%N6ME8`=p ze>n7C8Q+ldnC9RlOnR?6kqp@C& zX`dgO6%=%#Q_<)8HhTOLjJ@3OhoR@2t5+Xbt~_?Mt&54hh)JT%2jg~x8aC*4-S!lu z(>H-rHI(S*(5}mf`2_$M&HD-}!!C`VeDGE-^1y9tsDCJMH@$PdEV1F`%lpdbdscMX zver-)?Sur6VinK{sDW6_NK;E8+$qDXKF3P&{m`FL;4^^aoh6&v?}h+S>)BwaW2>)s z+`Y&7z+Ljq_vTE)kYJle%fEajZ5Nu#fcoZWaNwnBnQ zvCgmNCVQB>+KcB2Ymvz>vvTZWRGSsSwH7AbEsY>7_c8f^Ijia=o^x6o)LmHq>$g%$Lr=;0?&+dww>0iv1Yb@r#}n) z;3D`qzkUh^NM232j2DV$V=!NHdJNOuJcdF}-EDJ%{ta3sfDtKBnD+5)2J7 zRv{Lr8FzmWx`?jY+4Vf>ovR-OQ2BJ$tfsmETtxvFda>Qs&L9HJ~#B3-uB#A8nS?H$oz3IG6c)8pUJxO zLTh#&y`I`K)tad(34jewifpY2P4AGXqfjY*8`ReaFta36MddY2t5dA+WV;8+lb-`- zuI{2CL1saxFky^!342u zqJu)FN(U4nmlTT}McjNwD?wt?LRy7U?S&T3G3EF+c9LUZg57GB^C)`FIpkrxB4z8G|yw*Vu8dep7WQYF8p<%-XDQa>g{jnQo=uIIdwXL19m2YvSNS)*3P`Mr+{bE z%b7JIS5ZL`Tdg__mA%(kJPV6jw)Cnx*teP8!|4UrX7{FObVB*n%kNYqu(lPbGpXyi4$G+Fpy zblU%8p?L=9?`VZ*6h1RLI*KKE(#O7IsmM8_tmmx_hcvzru4mX~*x)s0P3_od!sV#Y z($Rx8D7kIeV3-?kTeP!3F44z5(8jtcVMmej?vtm#x5R{B3|=qgw0igslenHO7GNm^ zv^S=389!U&HllfL*hcv6+n$sS9uK-jk%2}ZC0=RAZ(ts7?5C%kF10#oR~JOs)w6$p zULNrG!%eCmw8*VNPP`m(2ZCRfO9<`cOMMHtrgdkf`5i03P>iL1z66z$h)k&|i&pl`P2P=|f+A#wRFdB%v>=>3J z(_OGt2|Vt`Y$ICr(YJjF7d>_NfEONawBm46HKoUG9yK`Yo!#E=oT&RIVE<@J(Y}$% zHA*(7hYq(*8o-}EK)MQxV9EG2yC;ojjg%*qzjoA-bRXJ9yT*oJJs#$2U9@@Gey$;4 z+`(jVpWwnfhvy>h@2X?gq*aO^`tPD-Qx=lujjZM2*n{WLvuW+5h$V6}Ah+_<14k8Q8b< zK~rNSx=C4mPhp1^8>Z0=LzH()e4*g5?u+6hV?9!%3xy%^58hhK56ZH=6K0Fwp7*?C zM)-g?Ac6m1O|b*9%9_ez;1u5@^EVcx-zL&~Kt?;wf{^9tW_p!kE&CK1@6C*?P0}Js z4Y(&N6DvNNNXsmuPinqa_o-SgcrYfi735KYg2#tvF~F|me-m*6ycfGqG>h8$+TLo8 ziN!I_fc2AVnx8@4<#P`s_D!G-PA;Ebcw9<|-1SpKZNDTf_!K;R{o?}PV8#Hi3rb7q?@FNX9%nKg^6w)e+b`gpqg#?D$0 z*pp9FhD!Hl&h*YrHTn6L_M|`!cCDnWAg3HrLk>maxt3ZMxpBc`&2T5tm}tUizY@}{ zJMkP6iHCSH|;N@E<*t3)#(3$06pq~AAS04UH z=n=mkR9^M2zI`X52XJiT1;P|;Pa~`WFSb$AgRJ^u4~ji?Kk$Ly7wt@p$WpE3M%*Cn z==_85+HhIfSXgwy&-Gnuoc5~4y#kZBCC68M7&cwT9=B`$gFPAs;{FSJEc`e2;Hjl~ z_YtCr2IK$19;Ic0(+&1TnINL^-ZJ*6?grR{J7lXXAl8G}bvuce+Q+ksXw*RHH_Ln# z#646}3-`UqbQya1MHo%~VWYM0oJ4~Yb=23U;%C}<8wX>zYVVF$JLf`jcYG=&%UV&J zauoNB?$}q;>@Hb}7Jw3(YTRn74KH-CBrNR+|B%j+g+$`8;E{V+@0)46kSLZ%_rMbkY6*>;AOmt*v`JDtWm^$BtNTq-TQoY&mn zYyHv4`-YkC?{&Tdn^o3>f@#?|Wd(i?%;|T=D-7lZM`%8bK1HmkNg$qMgA|V72p9^3_8nxq$#$2s3(_!nv$3 z^@JjuFCf=~{{=uNwF?*JB(zCp04Cx$$CKAFN7s;+Y=N`|CBBw!-U<>SY7(pw$C$yIKWN7L3uuA#pw~KCUPJo^iIYP&sq^>XzGA z|Kl9A&f*oehh`3&cQR7&x;zcViQX-Uxqz7!q=xtin43QAFRXZo`gHhPe!N=aS9RBr zTNFsdVxr;=%MSQ2~cjf2wpW$m_ zfizTq`O7Db-iv3)w+KfAM075aAc3y$rs1b1jH8-=J!bUYdzh0U$#uuAqXxSs$9-%P zYq|gZ=UDjA$)@ev6A^}w{KIp7hKGc&OiREW1#;^oCe-r1!n2xs5!2wq-$+EA0!v%W z!=9XJgyo0|gZ19H#ov92jC`=WX5oON**)6s0>Ptc z@5Yn~9y2CU(>6!zA9#%Oclb{}r4`sX7|#2XQS-yy4dwD+#Wt`4!aS2M4etqcUo<5)@Z`D2 zz&@X2Vd;LnPHy$vnw|Il*B8jdKBg?k%;^KXZ&A5elQ{}#YtU*a6t7C1fl^7|G$$fk zXMN$}cEXKD5D5b5f}9*7V>^1dk{a(ZNHkRt8W7vUkYd_?;FtK2u^w%;bO=NM$tW$> z#D3QGS`WB|>y!_g-Q*jvze)Q4q4|TT*Q1CqU7`L!vAaK(zh*YxN9ONc4irS!fm>s+ z#bjW3#3I}YO!8kPp7ftExt@EZhOse@6mdfRt$Hed>K1PuJfGQ25`X1W@aVUID#bBT zZL{}De^nr+pgW8SflWyl#iYjqIE>m znJd2_X4su`As5R(80KHS{3TK=zAg7sbneo};!TqxL@pU_C66Xd^|X=4)Fs^OY;Ze( z2S0EUj-GF)A3&nZO(xomyl+P!0Uf{lPRkar6jeTq@L9CIay;YSw0C@d1Gh@(8*X%_ zaI!cdib9b1BtcW+o{=tpU}JBL%M`BoSB2sU9-Tbt3b7U*%)5aakua7l3mBwt#9l%A zah|&A_u(fnzbz8P8v%}UJ?(Imqy2KjX36UI{eTaTAWFpI*o6%JrEA~gE0}Dbx#D81 z55HDn0t{EPe<33s_ zTrSl^^aM=y+c~=aAX^!vx58yt>C9kThZ`SQI;Q&V{)Zq>{s|7-C< z1_m-`cWO$NFX;kf)Y%!3RB__@Z$1c(`9KhzJGp0Q$56DRJ215BCFu~IP|3ktQCr7d zfqOqKKr-QZa!|3qq|>C9pLVQ&N9K@aP-LOUiL*W1a_QrI4j|HzN45dG65wmC`0=&k zqS!MPWKCOFYx*EA8SwD2*ES4e;W#z9ebN*TycK3d$#nFXlCGMeay)f?-vnBfhy$&x zYcoIkmgb8OM1{)8$$Y&pxNQDv5Z)6CmllCAZ?bq^z*tH1geTI{qvon@G3iPE86?In74lP99MuPo{_T^wwcQT` z#zvfDU*-OiQVGY)@UXCjc>~*rel4f>3LE%sT3#Iti6IEiL$YKnl!Z7?xu)?&fPgjp z(XfTD-h8qu5GA!vi@yG76q=mp{wSSFK6EPz@BXRkZ7gv@?n0xZ6uE_BHOi5@D98g8 zHY*iUYt|6s(}S#=`@VdQi#i=vWKx?eO9e19t?Rj)I`f;O(ZCh}5M4a5CrwCMD5d^K_Mkqzj4WV!&Hn8y2(`xxkT3 z({>aAPws@!do&(*RKH6f5pq12<vzXKW_|6`_yjtRp*uiu&@ z0`Xm#W`cYZ{S~=heJ(}rPOWr7hs8=y5I!$#73yK#_O#6rhB)BSa5TxIz0)Uw6n_7R8gO+-{$ zH=Z`Ua^1F6cj=c2bE&0#XBfnOf0edv$|+P5FFgtWI^43<&a0=0DWm1G3^W5a?DYSV zyW})<-5LbmQ2EdNbD34t0@qxJvH>gW?&FuEE(~qv44P;SHb)bGgI&`(I72E)V7hCN zW-Ml;<>#fdEeU=+;Mp-Qu$16d2S(V&1bT>i^q8xtIUPhjY@*N9E94=lj#zE}_LI)v zjzk*DL{@mvX1lGp2riHApv?b*8_k_vzC2O2v0#am%nsx2r5Z0^o(MnYb(+y~%g>GS z0MNuuW3!LrO(X3)(>*fFug5t|9CByryI#DKU-E)`>K`}{oD5*7dVUZJnT4{29G9o} zX2wXFU&5EIWS3t~OdgBwAQ?o&FWp~Cy^Y9E4ZKCL!m2RJ1E;(c8;7XF(aqnL`HzGz zU2La>u3`PR_d-2$`NZkwYXK2um+OB{oU`>vQC-EXo#|d>32FJn*~BxW(mlMs?{6k9 zLCMXRC50eFCE7n3^hctZ0S zfz)Y$449fGy~6`*gL^>8=@LKj!rONkCW$9YBol_MSIB9!!{X;P z3cvG>X@UYijym3MU&IoL2+17VNWNaUsm{Q)iz7Psb8|(O-`nz5(SiPP1!hYXir?Ll z2VkQ@y=DhV6G7i{@frUH->4XU0w)Xnhtn1A<`hOS$Ebtk+D%v#V7t6=f?2(|X$_Rg}rh7Ab;{igq2OpL= zPgz5^KgFlhZE4yduJoK@ANmb##l=uWBCA0I5eM;ZvUanRh_I$5xC-@@1v8&r@pzYq z;M*l$wMdjwme1e79-*oI7z@lZWetz0*+!oadFfyIU46*Rs@F0tT9`e-9rcW|U9NG*&Alr~B{k84Z{1>4}fqnQfwYyyd2Bo9)EIBTM$c z{V&#YRM;u3pHG59d&US;=E;s8<_tDv6*Wj8p|r9e4O?-9hR$M#NQZgtj4$S?67Q@N z4TDx-;BC3jV&nGSl!okw0pYSvL0**Ad?zUlvmWBvhvC^~HCN@Qe5tCPfI|G#^b_f~ zj2ICk+f-{kdcUb5l3O8yj3W0aJdy5>jCE;ZW5BaDL~r#AOjp7GV0dgGch;!A zhGklLQGfD>#m4-C$0d|{V1uHD=CqNG_`y-mhv2d&HDNq`l=(2ibStmV+Cu5+4o_tE zOif!7IF$t!e%00qha-0dO}_goEnd~^Q{yE#X<7xav?OkR%@*n@0@F^x zL&ZRdlzS&wY;qtFt+-KKJTW^tpXEQzMTpITkGpHOGfwAk zAQVd9HKcuM1b4~~t&Bu?ow6hFPYQDFFemUBFr6{Vd6pmFwz_K0F|v|+gi2}40x7hk zKXA3wK9LqWS31^_Bn=^1A<~u=`|2LXusW1CJUl5qJA}Fe_F6$+N-4Z+2#IF zQLjE2rutO@PN-Q4{{~NMqD4Fp>PL=U7Jbijk&Qe)BY03c{}1YOKRoU9Q!a<3)ogO_ z#gD^QHBldTr+{TzJQ2S1v#U?Ddsn1R=zAqA&b*BP5Too4DnTsi>u5)TYcxJP$QT|J zURxlfVVm;EPy6Evw;kg)V9$`l$My0x77S$|9moBoo&hHT4yQpNlJTc&ddgbl`43-Hf`m|7`v=JIcMaIQK(fL|#MQ znj&*f{S8-N#l%*>9|JN7Ql}o7H5#077r1P*+6sC!X_sFcw)02E4wtcj5T@U4*fCCG zk+}QS^M^A<8#(Z$tcU2OM4}EQM0JL@bvUI>&)VSJR!(@p{L+8-RwK+plD?`dl`k#ZMv1{2azU+}cPyXYs1lyAxM1Wt8X={__hP^S9sdf3!g4Zr~%d zQy6x2?kWBxMz4Zj%0NTu$PI&cpGBe4Y{cb7K(car3mdIFr)qks_Vsjyzo zkAIfgqS;8WKNDv(e%iE6mkhCCCg{mB-Qdnwqsj$~23Ky}NuWT= zl9F&{oKFiG0275?(-cDrSQ}pAKgL)$c&gX{d2RKM29S3fKwB}K_Y5*=UP0N-a6m7; z54$Wh9wvHo(B;;_MI~b(91r}CLhK_r?|1Z?2it866`KIDF+koma`z_XcN7i%#O~G+hK1=_3CZ&Sw#fYuLgB* z;iKDj8GUo{drFK%jjsw%KAs~1~5-yO?ky7hl4==Pri7A77s z+UoCKW#6ZQJ{1sbsc9fivZs}ijpVt#a_hhi))T~5@k~n`1=sVSC-VN-*!oZ1%Vs5$ zVZ)6>tcPD?gec|N5?OZ45E`>5GRzhiN~Z`as0B!gSCBvfAsS9$d#a%pt&OqvK55N+ zMH58fu|Dg{)%(1S1KiL5ptb$_=QnSN@`A343r$V=k$2vL2E!I;>MCgso1y}>IM6gD zK(^uKkM>HS^hqZ~?e!VEeZ*|>gr}&15%KxQCk@;_npRXiC3-{q%CYQ!9%k5dw@}8w zZhH}VLV}g#WPoa3InqvoJgcmBAdVw#A=et+C@i0m88&)X2ZvTnU%6e{4#bKxwy%Rm zA`I+e91Sn|G{o<+d^!lcJ>9h0*~s&?y$+pQ_w8M7D@dTP$5|~*xho@+v=BpW4D0vfDz?b-;};xYikN?mZcp2ZUPc(t;-eBoxD2|^ zYen$gF01y&)UJTKRM|z9;`lTaWKh<~f`o3Im1!%@3 zfw5D|S)dJLj09jBDZv#0tC=dko7xB(b!XAysh%_@uH=>kIEk)TP)eQnsl+Xs&`O$p z@2SPMrG`S&C&;>a&w`)?3r~%(y#4K_O{{Q((c3Dd$`=-2zgPYSK8kmq*C1}Z)DLk} zgh(i(X)>HL4a1>(4G#_g*E|M!o#T4B8}=Hn*X;SAfU>rRAyUS-U-7x-XgP(`0yaf$ zjr{DnW~VsdR(KrQ9;Cv_>d3RFWY|<&99K4gHW?Rcu^>&Gk!pP~GvPA=M){!nWk;q0 z^j`lD|MY+S%m3d$KMX~&#>x+aEV2s>Jl0pV1^As6TdANovN=7_3G!Ik6U`g-wHaM6x1SKq4STzY*>U| zmHT5IOg1ZJ-iGnb^e}!UAQc8rkviw~(cv=#3P_Yl65vPi8b2b@)pnU>@cWF9v5>(r zczRJ6z0!a1G4jJtcD*Zmu}0AQ^idcLw=y5)F`%hG$sGJueiS2%w-p7Va4PFCY>+3O zzh1Nfcn9&eN04!ju7>TDZ)PCPW@k_ftAkPjh|&W4e?%+GsaC=>{da4*9De^ttK9^f zY+cQU!6?#g+*oKEA3oEA?ogi#hwUV+4*u9SOyeuqVtxl}D`0wQxTTAn8nhB!gi8eW zdD(H$+K8oJ`st>TLE3DDkTe`n&~}3A|4uACJUlMOw=;C9PUgJk!DgGwcf*&)d=|@r zcjNo3#NFt&c9Is+9CZ+owdtm)*2wR=N^nq z{Lp?r>79o5?=>p1-uOTG8eky`=h1D$EriS~h8<8ERJ$mUgDQc1meDq8^ce7P=vK7v zVSZZkLA94Yx%nNS;#n?t^%eGg^Izm6mh-V)S@<>IF9p7>E}bljs`1^kGemgAiup{rP&eJXr@yshpH)EXl4 za6kIz=s4;H-qA-#v(UTK`NXWq;%V~WX?`teqFh8eW!cIS7H{ozV|fQ|Oepe5dj{Nt zE!zA$Gacirrtx~b_2E3@KE;-(sR^%VjjT)x5kKvpK#e@y&o1aQ5uj4^|bJLXa?2~!V9I%@-{^Db$={moiaLOowW33^ZbzP8|de^hm4a&VVn$4~&$EB`)?NNFbvxR7+d`g-sVZv@q)ZA5C!a^UBMzW58H)0F&6W)W9 zZpwzyMlht=z!2!{HqMIPmNDaESVGh;5PtPU?BRN-&pPU8)P9fJp>s`r)tKpw#!pdm z&935f8^-iFsKFI_>atiE2uSfFib79SVtB2ZXs%cb&$kFw6;5cuJ>OZL6&%QF!|8qi zk^o7MA@!bRWQ+fB3N5e%YV|~dhJanftHz-EvNszEK`*9DIl1p9qvpr#Fb#K5DWb}1 zph7l9cgcUCzXa@x+f#T@Z42z}lId$Z6UBIZ7=16AAPQ7>?TZDG2(Q5t<bEvAusk~XzQfR>`c3#H`&5py_`&$$OLy@=DhV`WhQ7)?XzCx&kyGPm5K1S0BnQ*z z6tlje{i4jYa`Z$jI!(>lN+!|f`F)zp>o~?rYH)ikdXwVlR*aqvq-Griz{;*|ER`=p zZSd>Jk@>H3E{i=bo*V?_>liRgNR(>;H#mPFNhRATNmEH9LTpTE1pUNUqYv1z5C_T1 z48*(@GL02pt#npCC_@xWG`?f?WUsAz# zbXBgVuvX>}H`4_W4hqF-0BjJ1R)ZlArHz$}Z5RR$wL#Q(PSM5=fyWoYQKvme^Tf3& zg9Dy`NL2X>u|toMJFBJK*DqW)_Lr@9F6?>}8OJm{lau5%fr zd+}F?3DszN7bX16E5wQ^t^Lo6^vJ5z3&Tgz6p?TfNH?g-?Qg9xy-8c*U$@z4^FfsQ zz76*?;67hAZ zc$Z!PHMkM+c8#~^N7&;Qkrd91VP5<&(Wfx}qLbh`s8+Uu5Vt^QahgI`D0>W6GZ}jW z!Kb4vTJgZZHq>)G0K`L|1c|eL%F_90j#H$EGBq|#VSX=MxZ2U=AEAFZZv*TI7T%h- zFp9igXt=rlfQH{dfbki`wt zRut0+gLG58KHBeUM^}Si+MhPskxB~yhm6j2kbUyp$*_Fz(T9&y^G$wwKi5>VpqW|( z(!oI40aV1oI!edHol#Fwb3=36yll2+Xx1s*^-<<9q+x(6j^K+az2*@*lLPbwoW*-zH1q3WL~;8rsRFb8Z1fFQoJ$jf9-iGZLK?Xabc_g)R3QDqpz}aB1;AAW^%;N2&X(5Z*U%oEPCoTQc+Z|p;9U5-6 zm$y9ZXecu5RqV^R%_;j=C62U-G<5@fx&VoZNG(v3&;+^=Xf6&-0$C7iMybYA6N?$d zt%`GbVxJ^O!?N~G21H#pIvk0t8FYTG|5v2oP zv32DS&(7pd(xxnEOO5d$uxyF}+-k}dhCQ+fWwK)o66}J4hzd;OVyN5=ggwWnD?+QO zAIWJ<{WxdXYTO$wDer>0q#SW3%B#e`wB9zAKyw1`+M7@o#b0{1baX2>h_fhV`r` zE9kpHK!%BmKZlW(RcH6INu7)NfjT+p2T zGfwNM!!z9HY31)c6KsUMngSGx_y)o=!lD!?whyqS4NWSp>`-UXzjoje%3L!mxiYZbHQ>O z`b$U$3gc(tRe3;g+NuL0VIHAr_;$*eIvH#-g3d`Z@C`^AOHW?FI7Y?#vDz+?)Czh zmi{1YB46jjo}}?C(YJL64ENIxpk4AEG_NDSefSjM{NT-tLceYKiI06~*t22DqErY{ z$U7DK1=#8e#FR1t=*&i>pP{Q)QbxZX;c6k@o9~J<#k)2a?R=MX9>g`C_uHClcZ#*?=Hv6Rab|uS)`x7@KFI2xM|Td- z6~p6V%G^2m@lmR~h#>gD+eT=G^wAuF|4_~pMQfZ|;}P0wJ1hS1bL`WfhL2^PPF)PJ zHx83LZ@m7dS{`t$du3lG{Z_x{x;D)<60bpqX zmfFW0P_3ipACuuF&Bq0Ss4=41$~4cS=lZ^M@*acJ zPh?cL&(Yg^(g}4=co5-*+V`A#d;`B8UA6SF-Nra!Rv94|2nX$27@#M(h7?9gTQYCQ zC!2ovn!cHeC#Rf$G{m;3+mkfqYWybr3g?d`Cuis3-D|T9N{eR=8vGjy_kN=MJeBkI zF79_y6l3K0XIVO8n9L)B?6;B{#WzY8rv2g5G^B~`4)?LMGg?=EPqRyULP12cM19;# z`TS%X2dV2RLxM@l%OSz(<|tb#t#4Fp zg8QAd&>GAsU_a+6GlQ;8&A(h;_xSOUSq3nO%{kdz6j?31Vks-G2$&f&OE{k{?!l&Qs_wN(^oS$fKbwSDbqo*dBGU64;Xv1gv%24(Lr#%f*S; zL5%FJzTIh$^Iv^#Q}xb2AlNWxJ=;}rXng$T^j8N^8kc}iEr#ArVlOIm`A1<-F#6Jb zM&3v>MiRusZNkhTlUk%BmBm6YSY&zNMCXGx!>*ZClbs!=D~9H)F#M|_-d32-y*sNr z9iMrQ7il4Qrzn^E(S89AGzM}R7si1bwkP|iE}0MN1M0I@cL^n9|McQ7z7gLTzX!Ic)x5V zsG;dReeeo)Py|Tu+y{8UN}&9o11PTrwEPj$+^pOMxLyP~Amy!4hADoa;1nExgSdfq z{PF8~!+_VX3Lq9r@P%)%?xLaI1xE+03lqpl&y6_J+G&Hm^ifD)g}oGhiROR9hkH@g ze^u@*E&A_NZRLydpbL~GJMvXB8m3tBugXg$9`54T+bNA%&sYj;y2`&Q@90m5GF4y} z8X_Dmz_N#@d@v)D3iO%es@OL_*A23iW?@jXYT+t4`8JCh%659AK7S9 z=eIu&&^ql=5*VTFzNALB0N_HEz6LOu>d6eA7WNw3-vnZRv)cWcf&`%++R)ifG=T$$ z?py{3ZD!CK;9ptHXe-*?zYw_US;V9@EKG|H|LJL6zQ<9MLmpNwcLyj@R$G_;7g3H> zC`2vz3d3r!nuWw1*&s2F13tE3w9BG10n;vkJ zI$J$Af#q|?G&v*;+j5Dx3m!Y0ioLp7^^@x$sD|tD!1vf3(;y(wlxs^jUiM>-?9MP| zMFp%gv1evLX2g1$_f>xo81SnoOR~ z(87lMe)Ep*@GQ8g4BifU|2xtFgUqgzXn*xcq_OOImxTBzj$Ncs9#rFM7<-*QlHfE%~K zw1wy?CVZbSGOQ);1tLjMj%a#fJIUzLb@xK{E_>>>gC(GG5Up2cWuKtl^$|VJ>kwcX z>hLFmE}@Nr#-3-wWWU2DlTfSQTii&sk@=A~5;-U(z0m7up=cj;myvxk5chQxuVbgG z{4BDlFc3@)-2}(`M&&L3c~&ZKCxG3%ux=*X{27@68Pj6qfmE`jGbN6tO0#04I#Zw+ zu|_u8<-fZM@aVRZ>Be}$AB~7hT_xJ&&JbLQQobso)wNtO@*FVnku@oEzTmHc7VSa} z9W2}Wj%;|p*nkxbu$qb@Y>;k4v+d{Up^~71cY~Jf2|ci-=ujNpamfnz${9nW9UQeA z1@~qRx!jB4M)yxRZ<}6>G=v@GSHke{@p(P47|u0YiiD^cuRuVTPHu_EYXgiIFM8nUx$2tf(vcd+eT{u4(e6(t zfe@QM%&#W^1LiSsz;ExF>k;8^fTHuH{2)q^)uVCoVz46`sb^UydWp$bzwlR6yCh4q z{=?Y{e`RpJ4? zN^cvc-y_2-_Io}9IKr4Ox&$89w&e%>+AyX!?wMvthOa}d4dBm);Df(*5d;|+tm~1o zP*2W07djt}J+GJ3#|gE~E9Aad7im4VITEZ7o&nY;R3VxH@pbt)E|b0yRh5_vSQ`}I z!>{{GwIkHYzzK@x`T%9P$XEX3d|6piHlCP!Y8x~7FCT;@kL*Hn@u-EXVpALeu+qT` zu0|g*VOMAcQ;U0zHw^y5-w(f<{;H(?5j9lqfb9rw(?R#3DPJ!1LE`Af%yHY>|ElQe zM3HYok6e>DiF6#gb&>@aNQ4UQ;VGz{tU(J!zpGO0d92K<;{W`VjIALd76DJ1`=Wp| zBv%G{J>hV&Sb1|$af~}wRw&WHwoUVNQS$oU!QGCAl0XZQ&AU=HSbZtG_rDEoeY#8? zl)1p%lQs5Pc12l3aP+D2r7}UPnisMt3QC)jCj_YX(hip?t_%?M$+P|qmv>mp{|NWwo4RD7NuH?VS{_2>e+P4hLgvTI9O3ORw&&8|-)d4k5Ko?o|F2H~ZwD zmFw#R@Yz*XrweOrwW`E>$+c&`62Md42g@p}p_ZNepPeWNxpqdNQdmVL$pW-)~PxacycH9=5 zm`YeIq$8^`fkR(Y2oe){D>J;8sV@M;bpjYsk1*6Mo4MQ%AFdSJoKy z4R;!;r%9_z)(q>-7{(|cQf7{3s$l+i5r$&Z^ZVtBZ5L@oxQtI-kwnn%!O#5_6oUVD zLRr?;p6{Ku@khIqroPQN%*k#xi?!Rvj!uEn0F-P0;KGh)u|0`ensolvhq7$RFE!Wi`J)9gPvvzsc|_ocE(XIavH1-f z{BL~y<|qP4#3k2XU>Rh^i}0iJj-u_&$hsyZqEoc0>6gvJyB#$VyLO{$X69~e)6A9M zVD4{mEuU$Pf=TZmB5$68k*Un4hVy5~l$HPNq zRcS}i-B9TIBxucHvCq))`+zH>Cr*`v!-fD8g39WF4}Pu_Me|xOzO!{9E`t5b`>}EY*d87q1pFxS`8X1 z#CW}Ie2aZ+`@JCl=l2_`=^?f0svi=Ar=S*!w&FC*=p<{?HlySkS~R>0I1WcXgElNM zRH&X_%R|L=k-yB?jtyNE;nRH@k6~8uWLD+)tkF|mMQPaB)7v+S*Ht`dk6&3QJX^(c zRiE1?$N4Mv^HGaPxe@l_l5o|6r)hb6pr>dz@FH&AN#8g_(%)Lk=@Wxkyr}~qH^QI^ znY$8_o%>hOp2vOnku|=4W1}_qET!V?JrFR};R4=V)M8SV!ce{*18|q~7u}LG937`Q zKgv3aI#l?fTfSUoKd!EJJS)`4_9!fij0-QvZ7l0e&01Bj*IpX2V=2Hq;PjIZd>}$_ z?toMR_c4f(#-U}<8WKQ({J2x|DB^<`llQMotLdm14M0n^*C}W6SqLP zcJYG_dJ0O(PfMnKcNrE_G@Xr{ajB)S)2jmW)bp>mrtTCoToZRsKi?FV_49gtoFP&! z#Y;#UShtb6RJ9y?4(y2;+0B8KA26LkE7Ap=MmS&dg7WT{?-VEFgQF5lB9`d&h^q_w zbAEa9txxWJeY3o_tnnk&RfKgp@s&qw+xJch$4DJ?3+0^!=GYo=3V15-(+tH@3s)q% zf+8oNEDtomP^u2-JXzzj?k2{7`~hX!cM5z4X=IbX;wLw_3o~pl-5Mj07W&U0y;Gj5 zqJ>*QZ)etZwz9OTdhJY2fDC&^!6Yt|XE27|=n~~+(A3~<%N&n`@JvsKE!Uf9#fEp( zi{{r%k1?)A(awY$Z2$FH$^w$$nGEP>Msf(l}r63!m*=^9YaMxBn8rjMy*Jz z$3EB@CC*ECmC{W5qa1%(df8YNZ_CpmX%hD+PUS~tuZSHn8%pJ@%hGkN3@_^lcprJ6 zqq10N4xZO2RWUb{?(l)UnrQfezsd39-Ra5o2)859{F$jfeH6 z_+OPsMmpJ$Xoq%*)YBmlZw5@L(`dVJS!T89G!lOVwG4-$j%tunKN@!F1bXUo3uNRN zE(tG1;;P>iZH50Ha$H|>ztQCOoBYaYAF;7U76t6c>pP`Kg!MVgTizfm*~h1;uT$z5 zbDw5_ug^LdC30&%n*UomVf#CC7x6v4&USLM7i>_ zw1a2}ve+V2vnGwi%NkL>h`BzHYBdQKcrqFcUa}E2pK3#*8)CB$&9?jScB8!_lIdZ} zRMsyxJqCr_o?U6KonA@IHmFZ1urH8RD66o|^zJ48HR~QVtc}TGNrTlht)-{1e^q>y z1+mOz``o;#(b(C70BWpIpWjZn&eW_C^9N78{m6a`l7*H%@iGeB)A^LJ{0!5#qV;X|q@+0ap3#zL zOO)18j7SZF3=f9emXuIa1#Ni!SX|oPnW9yZ6_$#i{Lr?*<9!>eKBi{HSG#PzbprR$ zLd-N$>{b>xJIf)(N|bXSeYN<5@-}7!bP#OyG+NYDH>uO;?R`KAcnh3%NuX;lQhAKv z1(lc51WOal?O zJ^EKNc?;BnCt3t50B%dPYnbEfPL6Ds8w=ir`Co*0qR6 zh+C;08)7M%Ggquq5>0C=9){-$Jo*l&ec49dN;{~$4T&6-Y#?1?f!D(HdgfSYC?gqS zL#<@ghY4pXn=sIPLc=V*raDRi(o|QN@!^|(pynnmX9Fp|C71&ctW!vlgY-xIe!2T$ z?D?mKJ-99h#&~iY{!yq@3w!`RtDvH%lX1Oy%T$vuO-)~DWZCQ?!DV7j2>1E|7(YYowUav$!>qXb zXh?-DmkIlj7)HmhA~~H*UDCOpb*M9l@GO`6f(P=T@}Xlu2{w2Fee~HGnP)i0z)h_~ z#9c}z%>j(h)&(jeVXKsTiYU}i+@lwy_`XS5#F?eqNCOVPZgvt|`Uv8mo4+=Cuh82s z50%vnlEUZFK;{LkPszjd>2H|uHK>*QxtWQ2<3Z;+bXm;3+$ho@D9r0FGsl@@LYU1L z*K#O* zVGfY@cBU!zinYv-%Uv@aL*P&WwjE7c$`c6HXL*~4W1YxzOn>@IKu#FG4n@q5B}mF! zDJlFF*0+^Rx~d249T+OhL2w-!Yybp|rule~_(e|PvpWU7Q~2Ztz9vUg-hHW;ZazXy z=*SPvbi62XvN!~PnhqlLDqO{h6z`{K!rjkAfajAgD}vL+IH~gktQ%@@;mU1*Sw4s6 zRZd-$CBa1*a3Sp^5+LAf--mH2fXf2HxaOWBjbQSW?D>qfk@~O7GvaEPQ&v$8On%;X zxPheM!f#0%Q6tjYh_?m=Fli5Ejh@2)27>W^#$|a=I`WYazVr-&U+!c62xFCH!a}cS zzbMRdL9hbEs!Qy}q5^f+&CGqR&Jyja*7!+KsCbg=TQuN!OTM?3)= z;KQe~1E%7oNBNIJ1^g61xj2XH6Hc+xh<48BK-$AYcyX@uA_B;do|%r3q5xbPzkVyV zQdE8;mYj{(x9$?IJnJTYY95FfwhC8+IAD*d_CZAqPS!m;4W1?j6ot>uXLKkbz)krR z+~WU-vo`^Uy8YY6wJMVAA?8lmx5}2KnKp!wJ?q>m*&36Ok!hv~+3&1nn`GY_%OJwU zSc>dhMj1nrEi+=-{W|Hzjy06Br(3TTytHY&v~Ao^L%IsHswxA6mCD} z7>FxzhVZ_vQ6lM(QPv9`=c#bdNw@yo$RUBHzR8xEzPsQ zcS6dYQAYQhk-=Ikrg7@9vpB2}m@*MXU(7V&yfotVq*csIbo5J8GdtmrDFo~)7|igZ zt&>8@LiC6Aja}HO6=OPY7m#8a=(TIcOXWSAKd8MfvZD1p4@PT@EvEGV_{pig&aZh_ zQ)_P|_uw$ybhamMbMPp)3tsOA-&2op0%~e8U^MQCOrY$OU}VB&nc<8(!{`gt!AKw( z*(|f0!?#c451|8x2!i6MNb?CJvx?6s$~%l|grx##th8F2M^5C;?kVK_gPkpg@^b1h zC;O8Ixi`f!p6$n1u~{sAY`YsfjAf2`JFA;aO`fv5KqE#% zkAp24M)bxrm14TBW7{-2`3v$;o--)yn0@#UW%>&@~DH>g%?Qx~;TU^DmY&_87qabc-r5#)2 z-Q`H%?vS=ll+~n9Mz96u@c?fS+Qi4_9im@EUuFgtvyGT6Fr%LU7uCtN=;Z!ID*7?ae+_Gv5+W*W-}RmY6y2?5ixSN`N}(#ks7j&Yf`7+t9| z7y(Se^N0T$ew}&|s3%#T`(B75WHQ9lG+NMOCh@z^)-@y+V39zi#K0{FD<>t^H>s=sru=w>q-!URXZK7!T z5wtZP723{~V@C4EYZ@6I0{JIp|Gkic@8y1iHEus5dLcn7<}4+;?HJpPQw#%hzzL30 z8D+E>z;;UCEcAm3wV1y2l{n~G&b~m;sGn1}yG(<3SU+)*Ez%a3TI^hZ`rt=$e%HMK zgtkxpnt8{z^`kxOeya2x7}<`?Z(90lhKy}jCwf)wCeMcb<0}i)1rezhf%s3|n6Cze zJSk2}J%V@b<=+EYjumCjonLzdw#Z6(rr>zH1lB&dJ4=$3nsu%kwiCwTwcoYFF~*$# zH{ZnmU*E(*ltLI}i$|Iz)`KpYDIc@iH|KA7?be%NBT2BVwr!!)}$u)T^z#d!?DjMyI8!&Xe|~P#~}(}{riYdF+9k9Y!l`_pi2^YGpg2R zfa${>BltrOgK4chQ5LW4LoN_#x%;j!{BN*ON~2-6@me2=(@#FaIzS4nrcn54DaCXVLk-; z*WBZYKa6xhm?ciuD9{LVkZFZ>@)7{u_LCM>nUXjl5>K^$9HOq(uH_q=6|%MXTo9Ni z@H;380|ErXxK!`Ff2-jQxZbmsAh+|<&kl`Db9+n<$|uOw8Y%YP-j}){_Xr06j(-nf z`e2#raLx%s#yP-*B{O4w^SoS6{+Is?Zc+OGaEqSsLGmD%f6we22rz5`_sNodlHL=o z#*3?50>tv{%Qh*xY1iISDraqBG8zP`U78i_fFDBPuSI{k<&W40B=xL*lfN@2oP^!X zjtM_(XUSncftMAFNRD>G3AY`FqmsI@0;p^U8ZUSSqTxr_`r)(41js&&A_~>nc9Q8y z52Z3Httw_I(mtBn3g5c#H;>ly>#v18++#nXZLuO_{&mlC-2dpJ@_+Jgb}-EhwhI*Q zZ?MvA&A(WRQGe!5s==y;CF~e?_57d`nWHQT6gbYQyh((9L#D!*Mw=k|2GhFC{ifCj zPbzk1HO9K}45n*x9?2FPj%imnWgG!dP^9Z`o>#75zNmgZlCU11)!t_c(&C8qxdngN z)?*q!^4+u<9*{9m^iIz0o8Zt~OlRS1lC}!9NsNzs80K+GFL(d%K8RA-*~|wzxK2D- zg3DLIaz-_u90&b-GGm*W%~D!2_%Z^Hdi*=sl*DC_Sqzn7LutSMfibL-L*^{-(kL*u z-7uP1-_|&1EoN5cclYzM__HsNDL?ij@Bm=g_u!X;($^QuA;PwxL#2DZuOH!#!X?n| zbA#ebkXrHeD=@HMvWqWsFgX%4UOeV`pj$6(L0?Dt!%wqPx+Oh^(QE9D z6~ug0iYt1N?ZPZAFE3F0*kjJ4#FJvivn#dh+DMxLcVg!U4h$7#VOL8kmz+wy>c%$l z!tASK!K^o=Gm`3fb?v39lJm>G9?CMIsv7qWyp~`@F>~?kQ?$t#JmD-s`Y0$PM5a;L zKVbuvI=Ob>i5t~WL;i4eo+NN~y&Ch5Tb`Q`Q23Ddt=6HXP4N|v(=Fp@Nza=FOx;1-v4YyV=3%zhE8p!*n$Hdc`L2*A_x6@ z!wcs5gPf9N6u!drbWfX0K!|InZG6;7aFZOZWn8p%LGS3<_M!)wErNh|jUuvcV~3hp zfRdkGoGbawqv9Xb0U;2(fN$q~JK(RkX0f|{cOaeanCT9vBz=*jj`bMRZq@9TO1RhX zXHZu3I|%t*tylG7n`#SR_kA#s8xrO(UObWE)If_Le-m?~MQ=XSIs(TCg4a>FmHm8g zu(h=b_I{oVX|qZ!^Rsy_pZr3;PZpi^3=An7D}lWpPo&xjTD}>`SsIJXQIs|_Kc{-? zqmQ|mBTT|6rAm4fZ{VP8pHM+v8W2eyFMDe)b~~-$Y_G6Qn)|5j29c?hm*_=HWq_6= z!W=Updn8Jm_>`;2so27Pvgp}%G*^f85O-mHkdMLeI64d#g?2F7r7)ol1`Di!MF*I5 zx!6tr3gT^f#$K;>n%a%ot}|TzM4VP)H^A21ejK(2*vDPitsI!Y9|w#7kUSLe?72#v zY#jfwkav>W^~Wg7pt*mI1RHo358EHy4Iy6_4EV3TfKy{xuk?vcb`hss!yJVC-&`>|h6%ht`%Wk?C0olF1pm496i=bI9g>&A2=j zIy4el#Z)C_8Rvv%J?SrLOwe4v;ch|d(A-q_P|^-}jjdcDg26YkTb)=Pzt)wj?+bqR za1Wdd6VCnuaizfs#87a>s zgx7_3=bQDuY85Q^R7Y0Wa1{xyC5+hRX=am-?WV-76P%=q zo{!%zCZfX6+LiDTJZ(>vD4Qu18Q3Ds?b41dSPrJXsDS#A&(v<7a*(%8OIy?j!rTkJ zfo6m;KV!wCU1la)3*TxVIDhG0-`a^&{kg(e=2WpmS!Y2xBZxjhn~ZMpr1N(Tqez{N zJMdUFokYjct2D@CBUn%+GtxoVMYff?@@etC_lx%UP=j48e^m(nE zhld*XNZsq4J6EFEP$4DiDP_OlSY$q4oTF^_7qo*OH-vQ1CB@na;Ftj5{*sx5p7aCtG-ECUcQnnK6aNkp^x%TU%N z%wLmuYW4V5+R%Fa+bWoG^GJVEtXKJR;~8HNFlca+;zejaZ6VU9%8y#e4MK~tZv4TJ zgp5LXN$~w3SM^)tyak~@TjU}tx|UvP<4_N$SQV?|;|&9yLJ z(^9y@@8E)xH}6*GspLw%c@UyHQesOueb79iY&2(HKiq-1B{*inMNg$;yoEG8I>dS8^^KO%EotdO;R`|jLHyeH zx|hRc-nT#NXs%7D(GR#8QDokrwJWh>`1P7egChUaeDgZS-3mVkm60bZdget2@hSIO z6eTOVzWAC-M|KVrsX;XbJc{HW5Wtz_Sfn$kJmw=iK)S^E2^vj^Wx)-v+xV z({?>vX-)y=Fk6H+*#S^6I}%rk+3q_F69$HV@F$vtFTC9b-i(3VBKHrp+8x#L`thm+ zCnD1Qp0it_@F($+i=Q(gh2sq{rS65adpn=r+m##}6fp7-yVxo<5`5hG+P8{T6~`!L zbHv%v5HA~J?uMv$zquq;>yWN)u||fhT~4)5LCos~^P;omfUH9Z`t#k(vA47G<2!8e zIsFQ`5R}p_B!bebIG%s^p-Lk9;vVG*aLS0@$%&LYY^PEmu*}TtSeIsN&<~XfTLzVU zVh)nFcN7`jd9Qm#9EKKKVy=%UIwe`STXm0oQ$CS#GGrU5KYrSA5316^&>dXnH_wga zm>qc6QVq+5+fSa1Bi@3OBdxQXvUUdN2Uq9Ia4~sm`vJJr3k!V%g zYF226<&CuuaiivujF(KkVmGu=7eUy~A1=(5V0zZ`KMs7|0cSM)w+8SdRGUPcu78Wa+|>s8`i5kbL76Fhj@da~Ye774-dmjCgQK zJbN#v5_XgLgjk;$OT3M6eTO47c0LU1xS<&(b+|0Fq9`+JlcTZjEFnRD{URSAiSHLZ zMz|!MX1L^i_sZA`mrK!AK6%e%d)Kq! z$4(}@fbr<=1I5@5Cn|DBGFXs(%$LGWJukiK2$%y5z{c}4LG#|;4Ytk?+W)UV<=|?# zqJB{bXukX(1~~s??HwrgQGPVqMor_pvzc_}eUq@=Ro8a?VNw2rNdfs_rt80UgUg!z zM$Som@VTh9;BwmQcJr7`7YZf>t)_(owDPu&PmjN)+Z=REXltr~QbmDIw$L&Pb znTmG+DmJ;!mY8eRd2hjyp4aOU&~hN$*t_#^hwVW3q7s^u*Dsx|Z>fK?@p=EoYq@^} zUKg?!Fyo28dBR`s=y&0@zf)cOn|+J3hKsWh+LUi+v9qI_DZk3oQ$5Oxv%Zg7^=r56 zVMNhO5?jPTY}yyp<+jY5<)|!*1wAHFjuu@s{|hZZ{h+3~_z+Uw(^dUlm@hx7jQMfLsf)t|Rks=-O*-_jDGzfBx zh748e{rbl0YLj8_`lJnAg&$wvZ{IMTJif=#*79WR94XE8u*w_tZo9t8_cU))B!gF$pa)c#>vx7Mvt=!f|x9 z9)avMz&&MO!p;#zfMHRn_@q>K>zs_aw1s~K>)B0TPfvd-@^xn*MENx3>gqb}dO2U# zPHlu?9!yQ`I5t$`SRbY%WmKCdr|s{~33(G_%Q!|y`CKe_5N%ojo#K&iHaT?%%}3Id zN6fc0kB+39--kPyJC?15xl}1%r1=_K`^YAHNYndD+O1jo_^Ea9ir$X$SnvR`AL&0Z zdq3q1gkf1$=zSK3Uiw)Jl`Y}L%Z+*VH@^k?7k~1gxSf*rtgp}VTVj0XJV&VMww zHNSb@1GIAQ6TrO^+53L;Ox)#ERcvvI5->}?B;=OuZ=OlL-#kN>W;XrYa`?`--#j}9 zUkdi%nSX6&+yx6*0{gdfy_nPJ94LdQG@s3%shBD*z}Jru&Yynf!b2_K$=qAIH^=F_ zMhrAPk9d`Ao;4z=&75!WGHFaymlC@od8G5DiG%qp28(Uq zUsbgZn@xaK3lzh-SD@F`^~Y0m9w@JWAdcBmT9>to5E*g6EB5}@R0BqARp!gGpDyogWHon zT1K??`$|||DnW=CeSF9%(v0|=ZWwM+Mha-_SHxT0#}9pa6{k9OHUa<<2MB}09}1a^ z?K`_&P7X86(3gxOYz5cM-EC|CJ zOS4($Tmj>%CI>qp4`c!ZjVo%$0g%Ss6+38&-F&Hwr6p?Y>k zM?ViWTwOT182WDtz0Ushieb!PYPNv*LO&M4B7d;2=ZvK=97otW7|k~#hoU05=-LcXAHUYX-yV~rpZVU>mbPh2rvO=@b(asqzb(&=z zs5pX>XAR7yj8EWVmES)_wnu|2&W_p7n-n=u_zqXB$rSiZKj#Kl>Hs8Mf+5MNhj|Ig zes;zkMenC4KT)W$ko<9b^mU8Q95xOkl;s?(*V;&E!Ci5`M_zZ1o?9Lmpsuwzl>l3E ztt)i=L;NxzGP&9S$*s;LSx^0Z_aG^q?oS268rc=f5s&G228~hcZ8A*ni}gczqw`$l z360G}sDPP<=9;uyq-|r%xO)9TgbQk2mgUxv#A2KP5Qm-@$e@>{XLiV3a%-lI;}YbC zZ)UsJ)y7|Is*%!rx``D!9ybuS_9}ZY^@@d)pi#K+{eJ>(@y$G=p4$)OW4e+&B;pp< z<-n}RCSt#8(~0Ya&B>;tRfNr^6nXFKc_EF$1i#hNkM>$T2U!Q{Z9>$A#Av?4AeUba zzR6`ziV~_!_wfCSIZQp1#q8ar#st@(&{ii_oOY9Meuzgwu7a zDwZ549qaaGjQGbJzS+_xq}%%2VZXYWigj2BKZt49oZ7Zxs?QB3K~-w$>OTOCLWz}1 z?sQ!-tTNzN+T_o@xNi3rp)qnjz}x@39Kvik;I>8B(aVLM{70ZMc-2@`D*y9q6`8ZX z6S-1b_pVx^9nhA3&(huS$2+`giXz*`i}poCl+w-LI;ez)*_NoF)4aSXDPQYp>ArJx zyAmlv{)?fai|KrK(|qUa+)Q-sI&XN|T8k*|yD(yPgkc_J_pS6bQHvOm{Oc@)sW{2W zoPuqx%t~=izl!_yG2rG_%YN$!26T|6D40lMS^M^{ahslx_59qpXF3%Q#d`JiL;Zth z^cFn2rFpl|jyGc&Ilp<}RnZk6KJ=}&;Ul9(GP7U&{JFsb{XyeGUE?VmW3g{C?f+F> z{F~3b{g4KU)f(CgV}z3fBrpRIk!+iqAbU%X;_j{g00iLBQ%7`*{wAW0PRHuaEg7eW?rk8^7|M+WE4YQN6_UcACd#98js?GHj;7tO); zc=%m}tqtyBQb?Kirk8M{fLZvVWM>7eyBa%Faur=_0W`iiQvWo6OmxqBuPQyX9Zqw2 z3(S~D?MK=UW7}Q1QpPNC^fd{`ox`y#v!!`gI%JiXKJ#>$#t;8Q`2$INj05^=Uj zyv+z8vtfEq*Hb$HQ&Y>kwu}0q{zC}%03eX>l8YMy#j6hI*t&aZTpd~3R&v}UWdGCX z(>o)6a6(5IU?`I=uU{a_&{DL8;@rSe=L^^nULa1>w$`ann>=kA5_m|64bL(wir$}c zWiCnC!}MCbbPXRioH!>XIMv8aIx!!^;X@yZy1_3Z3t(249%2t z-ARvxBECXCxUfQj_f8ee56e`dbi$(@VZz57n6Y$0i)i4GYy>X;$Vd!P;@4a`BRA&R zEagy~oPY7>@=oHyW}tfcNn0zkjRL#3VisJPK34c|p5Yws2w>gN0HEy!^Ao_(h64L# z>H(INSI}HCH*+bS6$S?q02=b6Z96D4X~TbK>BaeAPND7Mt{2*-n5sQU%1JRUPvA-`!im?LE@B6Po5zKj@fFpk82ao>bp9+;r!R_FD9CpBd_^*2?o`;Q~Q<{QM ziAhH8<;-{KT^r` zvX;s2FK&a6lVIGVnWW!5n{c9^AlpF7`!~-SvF|&TZEE9qBPZ`Ub=Pa7t9YFtWYLm~ z&Nd@#G&omdDe=khvWssVo8tF~zx7IXN0hALY0<#W_kFiD?+~0fkA~!itiG0Oj{Wv# zLi0z6ylZubop_niI##4K=&MmE-_Vg_xhUh1;rT<{wBsfjn$motz6QbRF3-*EN36vGjuwbHeO*{xX~P7mu_YiU{|C(wwQt#DS29Q zv^g=T7hQws%L)v-hwKN;T`U^*o5z^NcnNsD{9^3SnlZn5z7sKH5N5FUAorH) z@aAdYg6SXcs_yNcXZXTsO4@!xbIUiqrAGER?pog5bvqy4TUPNz&BV^E$nyj7xc1NF7+GE_EW5UU$zlr*Yt0&ZM zuab!>$?TJO|A!irbqhpS$;!jlKPW1Ncnp@}h|aCzPBg7A^dYqd|JB`g?;#~m!9a}0 zpu}}1W5o?DV8w0&ZHfCcQj<_+RL6YRJ(TS^6A@&Y8MW2G&|S}DR5OJC2I6(S_9)!m z-!Wxe^JYVqPa)2V@McseJlBunN5Ub>e)RpI^Vr-fpuEheD>vx(N}i_XyX2_;O+}xx4(YWkc-%3TbF#90DDire-Mxg| z>oYl9`A>Sw#tRI>+?5;>eKsue9L{CiZmSeIwqDUUCYz}Q+{J9l_lph=0wXsFI~hy6 z@*h~iJ2(fNjpYA~75{fV-&g1p6@B73fEL^Ur+7g}-c)a=47-vImVm+|m>g{OF?ItS z;!H(6C;MB}Z=RCG0OB>eS z**rb&V*KP)&0gidul#kRc~H=IMwMBX@AC(O#nKh$EV~=$)ph$iD~eFwJs5dfOcYz+ zv&yxnbZJLCDxVp8ZL})CVYD(py$V=8>SaZN6mxg?FTERq3-doenm9Y=iSU{i60_@jTF|s&CeP+LD`y@}~x*Qa`*dMqs;Tk4v0Bc@ZMK(qBqK z)b#zzC=;m9PjVWVyfGljBYojpq9OR%6935u)Je8ysrp!Et$>KlWY+wJde*IC8=0aw zS7SAssm0z}$EKh|=T)_z1i)|VfLlXYy_N zt_8N)_$psCD>K(9!gXiex9L~5k4U?hb+5#syVf6KHG)t!F=L62_|AOjK1UtKWRaOz zV_*VgK7F+=*~Zmb&FR{P!+KO&ZI4j~?oMh*&r}}j#`E@XVIF?GQ*X1_FF5(c1s!0M zB*+3oP)gg}LTt@=IJO-$u65MGZtOlZ6ndKO6p!Yo8(P!v2>C*_CpB9vOHodp9%92p zs$9fxp39Ruz8WE(MawQB^_=t{&w@g9TAXgrFg|q;s(@VQA<9M;utALBya4m>;1srn zeOhik^j15=*Q(w;t$euCv!X>qF41MU(;(!8c{(cGfJRUmE;ALfD$+L*{TxP57Yje z-^zcD75<-m-lOU^M1YBW2dOMA76rZ@$A@&{%_?;s3c8I}=^!8u)o*Mw#a%L9UcD&sMwRFQ4zCXe8Zz<@GVd@hftwzI{YE{NMB);)eYmd%ebeQsdi%k|3n<7 zI5j3w+(wNAX(c3sKA(mxlfvo_FX%a-K3qB7F9952ckb9qU*1MCmB6-#!?A_dxqT(w z)|}I59Xwg(-`Day+#oEd$P~XN&JB{o8A)_|TqA`o($kTG0=9gA9tkXp?N0COCd1>6 zbaw_jAxDxXa!3?( zKQnty@TrFi-K!IJuqC*`#uvyVH9K-uFHK6DwT3@@@I&CzwZ^0wl0|cC!iRH&Yai%? z>~k@H%8h&{1{^4X?U~@c`Oym{xO$pC*$%6-UbCbT=dC**YIeB{Mxg&{JKF$+t+UUy z<8fkcE~rLoPdr;p#AnFAIpbO3xvo~q)2bi0SOEn{;c1dqX`X{c7$#yGoHPK*$s^M2 zkSsb&6|^$OJfT4Ej-3VQCIRxOO#vm{ou|kQVVz@Ul4d078kvKMxTB4g{?6U=cHBt4)Xjv?)&IVD-RDp5FArZ{eFLk&(nTiHfnA7mDR%C{H1=l( zK7`4O09W3HjDYh=J7cE^%RTR8#SN0#8hy9}J@oLkk}LHrQ?}ZhP37M_HL2Y*0qe{w zli=h%FI7S|Zv4?Oxd;}CxUe8oz7SgndpymMLr%SCJHcd(&KO+)_*C><$DeSgj#lU{ zW>yHYQN0rX*AS?NPRNR|%si{X(|LP$sMvenRd4nYsh_M%+=DDn%!v)-?QUFrXr3d7 z>qHLmKiJaBvcvSVLRZ~C4U7-)u06yY2MfLpyOGqohugc@1>tCpsxmhId*;@^);#@z zgn@h`nX<7Dye(6(WN>-pAjg`Z!ow==n0IS7NN78rbbU*6t^dK9+UcGibPo3aC{q=Qp+}zf(cO;8Y z*i=#pj0lH8>)>)or7}xqfLMkKm?6&@de_jcBCD=!{Cb{3F(oE(Q4)Q7f2H>9F_m3a z_$K|{ZM?ifKCeC;{PelQVd_7C7g=QJgW)gIwmavl*g?xfbrs4P`jxgmUmNr#l)8m0 zrFjj?%mvIvO0%PuM(^hk!;Q(B1(wPWF1mMocvB*2soz15C*anVzvDi@uGCRA|L?1` zZNZoS3+%lM#+1XeUzRYmL1S*(3Ystv6?_2G6c9uu)MQR71-wGbdc~8V?P?dpF6@KV zlyI`nt#jk(4&k94uwU}9L)g_kCMYUwT6Np3L2LYMMw;6R+Wal#`Ckc(9H0yS&GWYo zaqG;B?+!l~yK(zh9xw`C`eUb>L*v=<1xq_cZTMM)Q@0+MxAx6gzycv$J)C7Vp|7^S zKxmBqHn=MF&h`4pLeZmGh4lHI!)?J#Ds>+Soflv>Gk}dLH}&Pza)t=Xb*{K5u!w0p zx6HM0eYzEI@y2oPOUPRkLZR;Rn3|?~-oTtK(firndg|g3r^(o7d4!P~I;^_REy>qB+tsIpyPN@?qi z6XG0<8r}4gnfwkaIboz#D8PHEsoJMf+beEHX>+`$31^)*A>`vSab2#d{`rNFfNs_I z_ucbVPrcrFV;${rl0e5kYB@cpDj=J6^p+hKsd96XCMu) z=Bc7iyczx4kb6A#mJmMF{mnt;*}HZQ$`f`So`vNj1zQgs>`QI^iVVb)Yf5x{FJ`Je z%yEgzRL|Ii=yq>!w`@SUh9FnGmJC&mmN-vzLR>35VpP(D;eCq{yP6@(@>cHu-R zOefdz;Byx?@q#`sbttTH7VB~|;qAs&#TP@n8dv=y-L3tI9NFZj ztj1(pKMT2l4GKfv{Wf#`7qczTGvS?QZDm8WM%C3pX};^tZ?2k-dByb6#+Gl;Ex!qS z^zqsWsi53+fPTu|*}O=}z1OjF=V&?8@2r}<%Fhffp`%W@d_>3KYQjyGE!+gIDJI=X zO53;BQvbt%(A$8SbSf|;&biL*c5zx0(@imgSNK3>hwd1ezRGoFNx6d4befrbmFC#6 zv8-q{PVVe`7^u+1*M_?{>e7#tbqEk&xXadOAqRuzAu&?0q&8k4i5`sLst*Dsz@DC+ zfdXWRZv|yJVpwh8K)UP;zX_>_U$-YB$n-7A*B-q;M(r}KB6`hkHU6^ef$#!T&0U-q zAW91oz0;71g?0F%jc19%Y&E(^eii6~TE;xz(kV}Xd=O#YW7%S9PUci1f-h;;`D8hy zO*Xis(Jl#pewVyz!#3dLL!nrb)^`tW8gI0b7;T>Fz8Jv{DMxLPy6;O(GZ(ZGidGLb zWCTgo)6@PQGO5=HPH{gb?W3iID%cw|{(QDUNbaJh!dn4iAICnwtcb8jou9V1M=wS?)O zOqQEmyGg47v-DfY-Lp+|r$r)i59!>|kVQSeaYCuXW9^D%$Qm7W{hI5tXI<`(T0KqmyAj=HMw^=~3S z#?DAT9Wi_^nV+Q#%YyK25tEuh3=JYtk19yAOe>EF>Y@QgtqS7($F?Z*c_`@ ze-JJarsm`@v1Qn5J2x?4Cajl1v9@?#DNZd&UbDP2dGYL%Ic>%pSFJ&bB;S4edZk53 z`6NCn-3VJBayOW^>EoFHVv0JqDZw|gX8cN4?G#|!?A!(`YlSHXDX{J*< zS9Z7xS>!5U#X6t2=xQ)fP)fdV^Wpu>B`@ij7LqNYHBKEmd6i>M%PV-jJR>!<1ruT3 zLBTrp$cUhvNv6Bfp-5EMn24^cnPW${yUIPWE$W64L=wp|=%@t~tv-s7Y z2?~c61R z|J`Q{K)2g38EMT~`ZECJ8DYLm){Cj{cQvO`x(DL{yfU`guK-5Tt%nc+!PYTmh@dJD z@^l}fX!^5}h?)r8V(&e|naVredV3gB0J`S9S`%d7Gd8Q z+ro;XooZ;8rD7tsu!m-txNfe#rJcjb>ZZCPwGgA+G*ad2LDUtmUa9pHa-Dt`fc$Ya z$V`(2nC5nAu9RZ81#VZ>VwlCePhQDHi zHe~N@raPWHZ~+ab@8&h$pFN;io5Xa%Hk|;^Zp~KVieP_c z6>yKg1_2J=@!T;S(}4}hGAVs+YG(D{1C4HZt}N{uEKss}_&Nmw{Pvk0sEc;&=$`TsB>@o$QBAhCWg z$UL#levF+h#4$3#n-lcCc58f(pp2HWAL}iybLXUfUTGVxmsb7o12OMY($p|U@J3Ax zUaxJn@L4O2oBwE^H|1VZ+vKdi@Y(L;op|ICpT?{fvOsZ(Z(!B=ZK3`ml2Z7m!ul_a zwD6FRaYG2C&Xrz-VNE3bn!PF_pjI;Dfa8tpFC0$_3sX>;d`C05a@OZo22lo+aN-lZ zMIh<;CvRc}W&!SEEMTi91Tb%vy{<9AV-*Bafu=xz92)!izQy=l#Na5$2R8@9<>GYv1>0$JjZoY1$147x;f*D#N?W$|7ENKr1z*vlRVyw| z(Oq@^+l-y_Eob8GN?}t`?T<`u3xz!n{dLi&aczKdT5CJXZ`zgl>Y;UqldN<@4*4@P z(lXm_+~~RNfS};XdozCf@)lebuMv`0@7KEKKl5rzOu3%Z?fP9AF^fau4%s0FD7xCI z0fD)1$L7yUc56M(izwvz&p(HFczJlv@*L)oQRbN6OB<`$7!AmdsZGdt(a?JGwMgH} zhR~#sJkV`^CZp2{zCld(#~EU1Or(A*NF6{;Q9Xr zA}vLnjc&f&_7~XSPH;&Eht3p=_+x7~T`t>QIoflmIlbrU>U_remo-<&fBz5_S2?^( z2S1C8Z4;tFq6mUKXp`}(mR?zJUsKyAvBEyZpmM4RBs z(Xt&aZmw%4yosHEku<9at_FIjUTHv#<`EglED0_cFyJ7YM^`n?rKhD0>yFB4g&+jl z&wo-o(wk&2yz6*i&Y?}bh<0zq{AtnCsH>L8pmK?PZ=8EXV#=Q8J4U&T(GTjF)bouk zk5*Y{i6j(BUJNR+mAV+X5p=8p)tlyTgO>u0K#@YZZMqBi&fT)x`L=2EXYHfrl|Gb6 znx9D~$^QaHI{ze~MMa6c*x4V0zj>0J(wKc+@cn4Jd23!~6IThX(;5TztsO7!V{le- zLa&0!FvED&`0Ovr^391L;jrUQ#!(#+fsqf$;}O@N_+2K}{^t24CBfMhjAKhU|K_1* z!K$@2cXltqm>P8;m`G=S4i&1^HEJJt^kZ?6j$@)1r1YElxRQOMi|ub7Op2dYQcaro zK}Drrb;F*%3KFw?_-k2wd#}p1+_s8Xk1c)WuHQVZ+N%>lO^DDQ9o?Wq_0f`v!e_yt z@tL#p4p-(aZ+{RH;&Hg_aJRht<-Tiw7Ub^S|M`I~3o`ac0qDdXyf*tLuadc!tlz)&J}|IOb#{oin(o>O)XwS%t}z)iA}2au7C6BKa_3}i?` z9>3TcJiu11p?4R}VI^EI>9J1~*a4azLUJ*U(f(pN@A}Z%DX`SK+{5_xMvvD`zv8gH zb%i%S0`KJixrc(|Pmu&#%mi9aUpRF0py=sBo4-Dsy)vXy`azLrSBfR((!Yjl(G?nb zaP#IWkFyCTnj&hBazzGKQDU&0rh-z5MGCK95&z(|TZeyU(s2=NO}Ze- z0QjzW7fi2kB_|xmb}t&21B018vi?%o->XRCo-ApJq}p*YDcVystR1l)31Z4V-YB8&a0OPSp3sDrO)q zesy!Z`mFC{rdE@MSM(9DJ0|;2lWmMe%WU)aStY6j`b)w*GdkIM%?o|mEbA5ploGnqI_~ezt{Hu+q5FZP=zUDQOBtN zpMn10m0{rfkZjCJPCi%O2TpT@@nQxtF**&SSnp{ZQ(>~(nhXJ!)h#1vO@TtJaEm0% zj4e*n%?1X1;-7s_bU8Ybts{V~u)D!Sc_U-qsIPHb^@bb;gh_8NaHB^8Im2rXfrx}W z-4SfZ9YhC>VZAB*#P;j309YJE90a1hrJA(TV=gmIS*~nvW|JbY`PXHi9^#&0N_XM} zjC5czL~qz`XC`rOuoDL$Apx!;s^t1)@o2EW;W>@vb%&N35VkhMi2%hO83OEwdb$Rm zv>7lFv+IY^7r{<=7wyQ2+_G51!s!8-^T~pr+4nfFxQCed9uFPbR$KgRBKGP)B+Y^Z z5pwMaz2|>F>j_b0y1g0ERs7=Rj9;GmP^;FQ^(vO0OtoIS^a?YHtJ%$#u}?E2X)9Fkry+P@v;+v2g(`uEXbK<*wk~~}w4ZJo-=dZnPHUW# zh+#X-4hj{&2=dHdmW7P!IP;w5@}Tc<@-ZS@3RDf;aV&QTCK9tIn2#!8cxE!{IC)$n zf8ZqK{RpG@C);E$Ac~d>0PO5 zy7a^>wv7BeCeIj5f;1*awh6NT9&6jjgo8{$A(koHn1&^tQ=QlBYE@)}(J~*iEhO6E zQQYI>XyXE=YiAqm4SldPImY|iINCjAc3skf3W=r&%rhU;k)0Dzu?fvhIFARJUl|#O zoZ4o%GJ&wW9-a&YWaI)zCKA%!kDYy;2wqnW5HNAmkq(Q4O0tESs^0U;(37az4;avg zl4N(&A)SAEH4FN(PgY|r+aZXyNd1vr@_j}CM7#v!1Wwlw$G4!cIf>P2uvm0B_%=Pv zD6Cd~dv$w`Uyxiuu*I;w&->D(Wk1L{EyRDh0!s*NLJbK4|AT{A1!?j0{SZ)HtI-yPX_{ zof8yep6TB1lvNo*pQBd6943%$9k{FP{opmu1i;8k?rGGCS!8sJKHa|qYDn*S3^hQ# z;z!2HiVUNbk<@i*?nj~sTaQ*UKlu(R(qZIO>MstHYLi-#Jw+RQx@9TecV(>QcqK~E zb(HNzwU%Z^UU`#b_bb^G^a!r>#u%;;Xk^X?7&+633>i)hRE67(t6hu-8W(&Vd>_nT z;Dim(rCC4S(M!-l?q|N5oHHfiBv&B2+tgR!g3uEv=}zuJdk@8AOUg+%}a$uQUrthtzGMAn(cVm?*;ELTJ)ZndO>NnG4mM>Nu|U=<@_I* ze;&|2C5UAHIX$hI=jhgRxE^F8@WKavAiFqeZO6DBP$5pyXa`ILBZSr&VxK5NsTy+% zVE%*A4|>*40G~ctAv&;OfGRMXVF?c)?fm|Od}yt4BgGQmi5Z?@E`-^5-nYe=nyG3H7BRof4bpr}JC=-PAnA8-0%#rjO)8Q^0neQvS~_rkkj&eWcZQidON?w2 zuVd=H1j&S%bmJ{SnPrL?Q~`aOrbgj63JjdJj&0sw4?Jh?p-MUo#4|&~Kmu?ws4bW$ zK~A>+Ala3g9A#ufB}QZG8Ua2oZ@szzUOWcVB+J6j1)l`gQ#Jz&Ch;zyDo8U5z~?|C zz62HIKyyql2EGWW;|&e;>*HvPzyfB;l`-wL{llv|;j1`uBtx9j28a$I`vA=^P^=xr zyod4*vHAZv*}(+3`%zkEbcwY+G|hQf50HfbM-@rBSxY#}*phBRl^`LH`r`Q5a4Ln@ z4}0JTW;e2Zu1AvWNuqP1NO=65LS78Er6xHW6iH3pp_Gz9lowq|U7AL`#VwRBZF@aA?_v4DL z0_Km!PFc~3zZ$CP0Wta3<-QgV83Gt5Zac2Jt$<~~c4pSmOWZg>SNb=6%7XhRF-nix z69whN09&7ygWiR{83y7Nm8i(BnX~luHD1M4I*oU(u~Wdm$0c_$rV>OH5Tgn3|F5%a zjcVdd!%k)>XMh3Kj!!kDoLgi9f{tYW2@ zA_^*EL_jJH8SbgIHgXLFVRC6J2q`lygxO>=?$@3@XZJ@vXV3nde>3Ns?|a|(d!Ofh z9#Pza9@wFBBxZ;#>120R=|FEf@;}xgueoyqiV87B+br{s0(xGN9P5#$VSvkVfMV9L z$KHYZv$;j4C*fqa$N9iDr0?F#L$zz~6U;3Ch#N`jBcsZ?kF-}h+PyCYDnU&^ ztygS(ViBL_4fkph1Aydg3v|pb{#*>Cw$IUo0wjw!mkKfmFz38HF*1QI5W2vp2(*BL zG6+2=Mo5u%zl;L_MgV9+3^GGtNf!cC_66R5?#^;>J&${y*N`TbB@oG6P-s}gs|QjW)>5l0UrHD<4vy}_ncytsA`2+LJKMF` z0znT)y+Z3#tP9pkmn6Hn1Ovnra=~V=r zlG>qqR=)qrQ+od3+%Da)@k+n&9f>VjtOZ53Bbvm`L|tX>x^bE3m!Z{(pT)qweKpe~ z@zgu;uA8y3iPAFxn1ozI+s-jZVWfw4p2=Eql0dXV0K?92AU(hwtdFG{)yBZ@c9$9( zEf?Wge=!0eQ1w5ly$|)W96DmRvsex?_2WYvi^i_BP-<9<5Ypy>{ z&>bZlKdx%P@?-}Qor-X))_XpAT=`7+#4pLJ4^72}GCHsZUC$SiqsV-6cw0$_+Z%+#XkMCI!}UQy zQ8=qm&v!P?*AI@S`6^cz2d z7FgIB86gCny7^q3Bc6PB?T%!#*c$e`pIS7S)D)}!LSx_Pk}bUw z{WO+tdp764#*1#RyLafkQa?kOM^y>#gpIIrD{5Bk*`LZQTNI1zuL^Z0~X zre`s+&awehj!2hc9oaLr>QhfeK=qLmk&^BywTFhEi6o-uCC=zEUf!nBPz|eyw+Lwa zRdp)DFZ-Q~!h#4)kb{>IEs6f=e03Hld?#Tx<{#H{!gBYth5i|~8N67(pxoJ{LT&nY}hSZiLivGUlM zs2(c3wvdoyQLH=3O9f0_NhebcbqZIg;Bc)W@I_~wZC%fq;X+Q>KgBw!p)>(780Omn zMMY~E0WWR!rIrtCr&=s4jQg8N8~k}PhVMqL8VjOzyw(Y&0~Oi_1~$_s^~6CPB#u#b z444j{b#o@XWNc6Y)$RN9)1NM126UvS<^ZI1FA#JAQ4O5^Lg6NOE_qf{$O^}K=VdA8 zlkh?yY*pRPYv32sZwlD@ofje>;nA&^$dq!yE)B1Xn@hWP=j7GEp*@^LPA;35IkPzJ zAmNcEdvkW_nCXnDSHeJ!LT>OnO9R4}sD0!5DUS>dPkog&w3{Qk8h7b-a>=!g-HH}8 z%Y>b$qTuA@TVqk|L&Zt#hd24Bw6on{C)ouUuN8Cel&zoix^W)53O#59^VRWi7@?x< z)@%b@m+Z!FPGVUGZ5wd|4(W^53iCAapxXDgl-=d4j31I91HaMLd_5NU9*d|Akga+Q zvF89yB1GG~<$&8;=ADCRHtY1cw|l4h$r<7>s3H$}SgtVh!B|5VkS)&d)%TE)d0{&Y z?=LZ4Z6?!cPjD1V%n-fY>g!e_*+E8$IKg@#*zF7ZZkIQD_A0!asL*G+}$kU68+c%zz? zC-c%bd$h%Q)2)P4_6DyRXu*U!sw%_Ye-^m)_`n$wV?BA(%%}V9UhvwhH^e~{Jw7eX6D`WOL5 z^kJ4txdYwnGHaLsQHhX~X?Nhi03JSXp}-g1*e?aXGZ7=XAa@YSxwwn+R0{Cz# z{b`x`IHH}gWFsbbPoVz5pgr52G4(w=;8%%sYll(PkQX2WWOO0H$FmLx})+t*ZoSl)8Y~R z@ppNSf&y-jH$M+`mAfzY*!F-dBjUoX+6RJn_9Wo^3)6%CZ&|B3QU4ae;yC$7n0@FW z?7l<+6})2n-ruDc;gDgqIY8Kbjon9Yq)u3}{WFLKpwXJ|znW<8$aDZ55in73O!sqL zRnn+UkD|kTE1q!bkYu5DnjU}ev$`?9EL6(}cALnuBuB@QRbU}<=#kvHu}JH3&fjj7 zeK7PHrk|QzmzMgVXk~m8PyK=SSGhjhPOV>c@q{+gHF^Tj5dUcf)|F{_gpS)_4@ES@ z8wB{#^?hPefEd6s@7Qgr=inx~4Yk-^0(x_d-v#IX=m%H)(XSPvpi5OWD``Y1`-;w` zfdx3wv1ui$f{8<8=71cOFdJ(u7b$BT4f>2f5>ic;MgWI+Po1hrdfIHWa^uF+s4WRa z53tG$H4h!X18GS#jKoU@pXgYvJO}u&r3!#uDDd_oRpwN>=ZUtIn=kjqKPtX;9I=%< zQEYrq9}jHrNrRbloc+AfWwa14X*o5)?y3ls)_il#lPuPaMC>EbPcEJDE7;}bJDvJ^ MxrNusFuWT27uS&q2><{9 literal 0 HcmV?d00001 diff --git a/docs/imgs/uintr.drawio b/docs/imgs/uintr.drawio new file mode 100644 index 0000000..1037941 --- /dev/null +++ b/docs/imgs/uintr.drawio @@ -0,0 +1,87 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/imgs/uintr.svg b/docs/imgs/uintr.svg new file mode 100644 index 0000000..1bfc1f1 --- /dev/null +++ b/docs/imgs/uintr.svg @@ -0,0 +1,4 @@ + + + +
SENDUIPI
SENDUIPI
Set UPID.PIR
Set UPID.PIR
Send ordinary interrupt
Send ordinary in...
If SN==ON==0
If SN==ON==0
Copy UPID.PIR to tempPIR
Copy UPID.PIR to...
If APIC.V==UINV
If APIC.V==UINV
Copy tempPIR to
UIRR
Copy tempPIR to...
If tempPIR=/=0
If tempPIR=/=0
While UIRR=/=0
If UIF=/=0 && CPL==3

While UIRR=/=0...
Delivery (Handler)
Delivery (Handle...
Set UPID.SN
Set UPID.SN
Hardware
Hardware
Software
Software
Set UINV
Set UINV
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/sosp24-ae.md b/docs/sosp24-ae.md new file mode 100644 index 0000000..e819df4 --- /dev/null +++ b/docs/sosp24-ae.md @@ -0,0 +1,416 @@ +# Skyloft SOSP '24 Artifact Evaluation + +## 1. Overview + +### 1.1 Artifact Directory Layout + +### 1.2 Main Experiments + +| Experiments | Figure/Table | Runtime | Description | +| ------------------ | ------------------------- | ------- | ------------------------------------------------------------------------------------ | +| schbench | Figure 4 & Figure 5 | | Per-CPU scheduling wakeup latency | +| synthetic-single | Figure 6(a) | | Tail latency for a single synthetic latency-critical (LC) workload | +| synthetic-multiple | Figure 6(b) & Figure 6(c) | | Tail latency and CPU share for co-located synthetic LC and best-effort (BE) workload | +| memcached | Figure 7(a) | | Tail latency for a Memcached server | +| preempt | Table 5 | | Preemption mechanism overhead | + +## 2. System Requirements + +### 2.1 Hardware + +Server: + +- CPU: Intel CPU with User Interrupts support (i.e. 4th Gen or later Intel® Xeon® Scalable processor). +- Network: Intel NIC supported by DPDK ixgbe driver. + +Client: + +- Network: Same as server side. + +To achieve low latency and stable results, TurboBoost, CPU idle states, CPU frequency scaling and transparent hugepages need to be disabled. + +For reference, we used the following setup when conducting evaluations in the paper. We recommend using the same CPU and NIC, since results might depend on core number and clock frequency, etc.: + +Server: + +- CPU: 2x Intel(R) Xeon(R) Gold 5418Y @ 2.0 GHz +- RAM: 4x 32GB DDR5 RDIMM @ 4400MT/s +- Storage: 1TB PCIe 4.0 NVMe SSD +- NIC: Intel(R) 82599ES NIC +- MB: Super Micro X13DEI + +Client: + +- CPU: 2x Intel(R) Xeon(R) E5-2683 v4 @ 2.1 GHz +- RAM: 4x 32GB DDR4 RDIMM @ 2400MT/s +- Storage: 1TB SATA SSD +- NIC: Intel(R) 82599ES NIC +- MB: Super Micro X10DRI + +The NICs need to be inserted to the PCIe slots connected to the CPU0. The server and the client are directly connected with an SFP+ fiber-optic cable. + +### 2.2 Software + +- Linux: We recommend using Ubuntu 22.04 LTS. +- Python: for plotting figures; `numpy` and `matplotlib` packages are required. +- git +- GCC +- CMake + +Other software dependencies are provided in our GitHub repositories, and installation steps are covered in latter sections: + +- Linux kernel +- DPDK +- schbench +- RocksDB +- Memcached + +## 3. Getting Started + +**Following instructions are for the Server.** + +### 3.0 Check Requirements + +Make sure the CPU supports `UINTR`. If it does, the output of the following command is not empty: + +```sh +$ cat /proc/cpuinfo | grep -w uintr +flags : fpu vme de ... uintr md_clear serialize tsxldtrk ... +``` + +### 3.1 Clone the AE Repository + +```sh +$ git clone https://github.com/yhtzd/skyloft-sosp24-ae.git +``` + +### 3.2 Build and Install the Kernel + +```sh +$ git clone -b skyloft --depth 1 https://github.com/yhtzd/uintr-linux-kernel.git +$ cd uintr-linux-kernel +$ ./build.sh +``` + +### 3.3 Configure Kernel Commandline Parameters + +Take GRUB as example: + +1. Open `/etc/default/grub`, add or modify the line: + + ```config + GRUB_CMDLINE_LINUX="isolcpus=0-23,48-71 nohz_full=0-23,48-71 intel_iommu=off nopat watchdog_thresh=0" + ``` + + This isolates all cores on CPU0 for running `Skyloft`. Core numbers for `isolcpus` and `nohz_full` should be adjusted according to the specific CPU model. You may use `numactl --hardware` or `lstopo` to check which cores belong to CPU0. This also disables IOMMU for `Skyloft`'s network stack to work. +2. Generate the new grub config file and reboot your system: + + ```sh + $ sudo update-grub2 + $ sudo reboot + ``` + + Remember to select the kernel installed in the previous step when rebooting. +3. Verify the kernel and parameters are applied (the kernel version string might be different): + + ```sh + $ cat /proc/cmdline + BOOT_IMAGE=/vmlinuz-6.0.0-skyloft-nohzfull+ root=UUID=3f07ca35-20a0-41df-a6c3-96786074c290 ro isolcpus=0-23,48-71 nohz_full=0-23,48-71 intel_iommu=off nopat watchdog_thresh=0 quiet splash console=tty0 console=ttyS0,115200n8 vt.handoff=7 + $ uname -r + 6.0.0-skyloft-nohzfull+ + ``` + +### 3.4 Download Skyloft + +```sh +$ git clone https://github.com/yhtzd/skyloft.git +$ cd skyloft +$ git submodule update --init --recursive +``` + +### 3.5 Setup Hosts + +Disable CPU frequency scaling so all cores are running at base clock, and setup hugepages: + +```sh +$ cd skyloft/scripts +$ ./install_deps.sh +$ ./disable_cpufreq_scaling.sh -c 0-23 +$ sudo ./setup_host.sh +``` + +### 3.6 Install DPDK + +Install DPDK v22.11 on the machine: + +```sh +$ git clone https://github.com/yhtzd/dpdk.git +$ cd dpdk +$ meson build +$ cd build +$ ninja +$ sudo meson install +``` + +And show the NIC status: + +```sh +$ sudo dpdk-devbind.py -s + +Other Network devices +===================== +0000:2a:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection 10fb' unused=ixgbe,vfio-pci,uio_pci_generic +``` + +then bind the NIC to the UIO driver: + +```sh +$ sudo dpdk-devbind.py --bind=uio_pci_generic 2a:00.0 --force +``` + +### 3.7 Install Skyloft kernel module + +Make sure you have completed the previous steps: + +```sh +$ cd skyloft/kmod +$ UINTR=1 make +$ make insmod +``` + +**Following instructions are for the Client.** + +### 3.8 Setup + +In this step, we use the DPDK submodule of Shenango. + +```sh +# Download Shenango +$ git clone https://github.com/yhtzd/shenango-client.git +$ cd shenango-client +# Download and build DPDK +$ ./dpdk.sh +# Setup hugepages +$ ./scripts/setup_machine +# Setup igb_uio driver +$ cd dpdk +$ ./usertools/dpdk-setup.sh +# Then type 38, 45 and 62 +# Bind the uio driver +$ sudo ./usertools/dpdk-devbind.py -s + +Network devices using kernel driver +=================================== +0000:02:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection 10fb' if=ens2f0 drv=ixgbe unused=igb_uio,uio_pci_generic + +$ sudo ./usertools/dpdk-devbind.py --bind=igb_uio 02:00.1 + +# Build Shenango +$ make clean && make -j + +# Build synthetic client +$ cd apps/synthetic +$ cargo build +``` + +## 4. Run Experiments + +Each expriment needs different parameters, such as number of CPU cores, preemption quantum, and which CPU to be used for IO activities. These paramerter profiles are stored in `skyloft/scripts/params`, and merged with defaults at compile time by the build script (`skyloft/scripts/build.sh`). + +### 4.1 schbench + +The `schbench` experiment shows `Skyloft`'s per-CPU scheduler performance, by comparing the 99% wakeup latency of the `schbench` schduler benchmarking tool. This experiment uses 24 CPU cores, running different number of worker threads, ranging from 8 to 96. + +The parameters of each build target are listed as follows: + +| Build Target | Schedule Policy | Preemption Quantum | +| -------------------- | --------------- | ------------------ | +| schbench-cfs-50us | CFS | 50us | +| schbench-rr-50us | RR | 50us | +| schbench-rr-200us | RR | 200us | +| schbench-rr-1ms | RR | 1ms | +| schbench-rr | FIFO | No preemption | + +To run this experiment, take `schbench-cfs-50us` as an example: + +```sh +cd skyloft +./scripts/build.sh schbench-cfs-50us +./scripts/bench/schbench.sh cfs-50us +``` + +The results are written into `results_24_cfs-50us` folder. Data from different number of worker threads are summarized in the `all.csv` file, and the output of each run are stored in `.txt` files. + +To plot the figures, move `all.csv` file to the `results/schbench/skyloft_cfs50us` directory (change this path according to the build target), then run the plot script: + +```sh +mv results_24_cfs-50us/all.csv results/schbench/skyloft_cfs50us/ +cd scripts/plots +python3 plot_schbench.py +python3 plot_schbench2.py +``` + +The figures are saved in `scripts/plots/schbench.pdf` and `scripts/plots/schbench2.pdf`, corresponding to the Figure 4 and Figure 5 in the paper. + +### 4.2 synthetic-single + +The `run_synthetic_lc.sh` script runs a single synthetic LC app (`shinjuku`), iterating over different target throughput, with the following parameters: + +- Workers: 20 +- Dispatcher: 1 +- Load distribution: bimodal, 99.5% 4us, 5% 10000us +- Preemption quantum: 30 μs + +```sh +$ cd skyloft/scripts +$ ./build synthetic-sq +$ ./run_synthetic_lc.sh +``` + +The experiment data is saved in a file named `data-lc` in CSV format. Each column corresponds to the following data: + +| Target Throughput | Measured Throughput | Min. Latency | 50% | 99% | 99.5% | 99.9% | Max. Latency | +| ----------------- | ------------------- | ------------ | --- | --- | ----- | ----- | ------------ | + +To plot the output, move and rename the `data-lc` file to `skyloft/results/synthetic/99.5-4-0.5-10000/shinjuku-30us`, and run `plot_synthetic.py` (this script plots both `synthetic-single` and `synthetic-multiple` figures): + +```sh +$ mv data-lc ../results/synthetic/99.5-4-0.5-10000/skyloft-30us +$ cd plots +$ python3 plot_synthetic.py +``` + +The figures are saved in `synthetic-a.pdf` (Figure 6(a)), `synthetic-b.pdf` (Figure 6(b)), and `synthetic-c.pdf` (Figure 6(c)). + +### 4.3 synthetic-multiple + +The `run_synthetic_lcbe.sh` script runs both a LC app and a BE app (`antagonist`), with the parameters same as the previous experiment. The `Skyloft` is built with `sq_lcbe` sheduling policy. + +```sh +$ cd skyloft/scripts +$ ./build synthetic-sq_lcbe +$ ./run_synthetic_lcbe.sh +``` + +After running the script, the terminal output might be messed, and need a `reset` command to recover. + +The experiment data of the LC app is saved in the `data-lc` file, which is the same. The data of BE app is in the `data-be` file, with columns defined as follows: + +| Measured Throughput of the LC App. | CPU share of the BE App. | +| ---------------------------------- | ------------------------ | + +The plotting procedure is similar: + +```sh +$ mv data-lc ../results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-30us-lc +$ mv data-be ../results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-30us-be +$ cd plots +$ python3 plot_synthetic.py +``` + +The figures are saved in `synthetic-a.pdf` (Figure 6(a)), `synthetic-b.pdf` (Figure 6(b)), and `synthetic-c.pdf` (Figure 6(c)). + +### 4.4 memcached + +Build and run the Skyloft memcached on Server: + +```sh +$ cd skyloft/scripts +$ ./build memcached +$ ./run.sh memcached -p 11211 -t 4 -u root +``` + +Run the Shenango client on Client: + +```sh +$ sudo ./iokerneld +$ numactl -N 0 -- \ + ./apps/synthetic/target/release/synthetic \ + 10.3.3.3:11211 \ + --config client.config \ + --threads 32 \ + --mode runtime-client \ + --protocol memcached \ + --samples 30 --start_mpps 0 --mpps 3.0 \ + --transport tcp \ + --runtime 1000000000 +``` + +### 4.6 preempt + +First, build benchmarks for various preemption mechanism: + +```sh +cd microbench +make +``` + +Entries in Table 5 can be obtained by running the following commands: + +| Entries in Table 5 | Command | +| ------------------ | ------- | +| Signal Send/Recv | `./signal_send_recv` | +| Signal Delivery | `./signal_delivery` | +| User IPI Send/Recv | `./uipi_send_recv` | +| User IPI Delivery | `./uipi_delivery` | +| `setitimer` Recv | `./setitimer_recv` | +| User timer interrupt Recv | `./utimer_recv` | +| Kernel IPI Send/Recv | `./kipi_send_recv` | + +For the Kernel IPI benchmark, you need to get the output of the kernel module by `demsg`: + +```console +$ ./kipi_send_recv +run on CPU: 2 +sender run on CPU: 3 +work time = 1999826054 cycles +kipi recv total latency = 4370643528 cycles +$ sudo dmesg +... +[42868.588566] skyloft: skyloft_ipi_bench: total=4370647834, avg=437 (cycles), ipi_recv_cnt = 2761085 +... +``` + +The send time is `437` cycles, and the receive time is `4370647834 / 2761085 = 1582` cycles. + +## 5. Related Work + +### 5.1 ghOSt + +First, install the [kernel](https://github.com/google/ghost-kernel). On Ubuntu 18.04, install the generic version 5.11, and then in the `ghost-kernel` directory, run `make oldconfig` and then install. + +The `[ghost-userspace](https://github.com/yhtzd/ghost-userspace)` project uses Bazel as the build tool. When attempting to build, the following error occurs: + +```log +ERROR: external/subpar/compiler/BUILD:31:10: in py_binary rule @@subpar//compiler:compiler: +Traceback (most recent call last): + File "/virtual_builtins_bzl/common/python/py_binary_bazel.bzl", line 38, column 36, in _py_binary_impl + File "/virtual_builtins_bzl/common/python/py_executable_bazel.bzl", line 97, column 37, in py_executable_bazel_impl + File "/virtual_builtins_bzl/common/python/py_executable.bzl", line 108, column 25, in py_executable_base_impl + File "/virtual_builtins_bzl/common/python/py_executable.bzl", line 189, column 13, in _validate_executable +Error in fail: It is not allowed to use Python 2 +``` + +Directly modifying the `BUILD` file to change the default from PY2 to PY3 resolves the error. + +To install `gcc-9`, execute the following commands, as the default `gcc` version is too low to support C++20: + +```sh +sudo add-apt-repository ppa:ubuntu-toolchain-r/test +sudo apt update +sudo apt install gcc-9 g++-9 +sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 60 --slave /usr/bin/g++ g++ /usr/bin/g++-9 +``` + +To install `clang-12`, use the following commands: + +```sh +wget https://apt.llvm.org/llvm.sh +chmod +x llvm.sh +sudo ./llvm.sh 12 +``` + +During `sudo apt update`, an issue occurs where an AMD machine attempts to pull ARM64 images. Specify the architecture `[arch=amd64,i386]` in the `source.list`. + +An error `error: use of undeclared identifier 'BPF_F_MMAPABLE'` occurs. To resolve this, in the `ghost-kernel` directory, run `sudo make headers_install INSTALL_HDR_PATH=/usr` to overwrite the existing Linux headers. + diff --git a/include/net/arp.h b/include/net/arp.h new file mode 100644 index 0000000..da5fda4 --- /dev/null +++ b/include/net/arp.h @@ -0,0 +1,46 @@ +/* + * arp.h - Address Resolution Protocol (RFC 826, RFC 903) + */ + +#pragma once + +#include +#include +#include + +struct arp_hdr { + uint16_t htype; + uint16_t ptype; /* the ETHERTYPE */ + uint8_t hlen; + uint8_t plen; + uint16_t op; + + /* + * Variable length fields continue as follows: + * sender hw addr: hlen bytes + * sender protocol addr: plen bytes + * target hw addr: hlen bytes + * target protocol addr: plen bytes + */ +} __packed; + +struct arp_hdr_ethip { + struct eth_addr sender_mac; + uint32_t sender_ip; + struct eth_addr target_mac; + uint32_t target_ip; +} __packed; + +#define ARP_HTYPE_ETHER 1 /* ethernet */ +#define ARP_HTYPE_IEEE802 6 /* token-ring */ +#define ARP_HTYPE_ARCNET 7 /* arcnet */ +#define ARP_HTYPE_FRELAY 16 /* frame relay */ +#define ARP_HTYPE_IEEE1394 24 /* firewire */ +#define ARP_HTYPE_INFINIBAND 32 /* infiniband */ + +enum { + ARP_OP_REQUEST = 1, /* request hw addr given protocol addr */ + ARP_OP_REPLY = 2, /* response hw addr given protocol addr */ + ARP_OP_REVREQUEST = 3, /* request protocol addr given hw addr */ + ARP_OP_REVREPLY = 4, /* response protocol addr given hw addr */ +}; diff --git a/include/net/cksum.h b/include/net/cksum.h new file mode 100644 index 0000000..2dd0521 --- /dev/null +++ b/include/net/cksum.h @@ -0,0 +1,138 @@ +/* + * cksum.h - network checksum routines + */ + +#pragma once + +#include +#include + +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. + * Copyright(c) 2010-2014 Intel Corporation. + * Copyright(c) 2014 6WIND S.A. + * All rights reserved. + * + * These checksum routines were originally from DPDK. + */ + +/** + * @internal Calculate a sum of all words in the buffer. + * Helper routine for the rte_raw_cksum(). + * + * @param buf + * Pointer to the buffer. + * @param len + * Length of the buffer. + * @param sum + * Initial value of the sum. + * @return + * sum += Sum of all words in the buffer. + */ +static inline uint32_t __raw_cksum(const void *buf, size_t len, uint32_t sum) +{ + /* workaround gcc strict-aliasing warning */ + uintptr_t ptr = (uintptr_t)buf; + typedef uint16_t __attribute__((__may_alias__)) u16_p; + const u16_p *u16 = (const u16_p *)ptr; + + while (len >= (sizeof(*u16) * 4)) { + sum += u16[0]; + sum += u16[1]; + sum += u16[2]; + sum += u16[3]; + len -= sizeof(*u16) * 4; + u16 += 4; + } + while (len >= sizeof(*u16)) { + sum += *u16; + len -= sizeof(*u16); + u16 += 1; + } + + /* if length is in odd bytes */ + if (len == 1) + sum += *((const uint8_t *)u16); + + return sum; +} + +/** + * @internal Reduce a sum to the non-complemented checksum. + * Helper routine for the rte_raw_cksum(). + * + * @param sum + * Value of the sum. + * @return + * The non-complemented checksum. + */ +static inline uint16_t __raw_cksum_reduce(uint32_t sum) +{ + sum = ((sum & 0xffff0000) >> 16) + (sum & 0xffff); + sum = ((sum & 0xffff0000) >> 16) + (sum & 0xffff); + return (uint16_t)sum; +} + +/** + * Process the non-complemented checksum of a buffer. + * + * @param buf + * Pointer to the buffer. + * @param len + * Length of the buffer. + * @return + * The non-complemented checksum. + */ +static inline uint16_t raw_cksum(const void *buf, size_t len) +{ + uint32_t sum; + + sum = __raw_cksum(buf, len, 0); + return __raw_cksum_reduce(sum); +} + +/** + * Process the pseudo-header checksum of an IPv4 header. + * + * The checksum field must be set to 0 by the caller. + * + * @param ipv4_hdr + * The pointer to the contiguous IPv4 header. + * @return + * The non-complemented checksum to set in the L4 header. + */ +static inline uint16_t ipv4_phdr_cksum(uint8_t proto, uint32_t saddr, uint32_t daddr, + uint16_t l4len) +{ + struct ipv4_psd_header { + uint32_t saddr; /* IP address of source host. */ + uint32_t daddr; /* IP address of destination host. */ + uint8_t zero; /* zero. */ + uint8_t proto; /* L4 protocol type. */ + uint16_t len; /* L4 length. */ + } psd_hdr; + + psd_hdr.saddr = hton32(saddr); + psd_hdr.daddr = hton32(daddr); + psd_hdr.zero = 0; + psd_hdr.proto = proto; + psd_hdr.len = hton16(l4len); + return raw_cksum(&psd_hdr, sizeof(psd_hdr)); +} + +static inline uint16_t ipv4_udptcp_cksum(uint8_t proto, uint32_t saddr, uint32_t daddr, + uint16_t l4len, const void *l4hdr) +{ + uint32_t cksum; + + cksum = raw_cksum(l4hdr, l4len); + cksum += ipv4_phdr_cksum(proto, saddr, daddr, l4len); + cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff); + cksum = (~cksum) & 0xffff; + if (cksum == 0) + cksum = 0xffff; + + return (uint16_t)cksum; +} diff --git a/include/net/ethernet.h b/include/net/ethernet.h new file mode 100644 index 0000000..84da2f6 --- /dev/null +++ b/include/net/ethernet.h @@ -0,0 +1,339 @@ +/* + * ethernet.h - protocol definitions for ethernet frames + * + * Based on Freebsd's sys/net/ethernet.h. + */ + +#pragma once + +#include +#include + +#define ETH_ADDR_LEN 6 +#define ETH_TYPE_LEN 2 +#define ETH_CRC_LEN 4 +#define ETH_HDR_LEN (ETH_ADDR_LEN * 2 + ETH_TYPE_LEN) +#define ETH_MIN_LEN 64 +#define ETH_MAX_LEN 1518 +#define ETH_MAX_LEN_JUMBO 9018 /* max jumbo frame len, including CRC */ +#define ETH_MTU 1500 + +struct eth_addr { + uint8_t addr[ETH_ADDR_LEN]; +} __packed; + +#define ETH_ADDR_LOCAL_ADMIN 0x02 /* locally assigned */ +#define ETH_ADDR_GROUP 0x01 /* multicast or broadcast */ +#define ETH_ADDR_BROADCAST \ + { \ + .addr = {0xFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF}, \ + } +static const struct eth_addr eth_addr_broadcast = ETH_ADDR_BROADCAST; + +static inline uint64_t eth_addr_to_uint64(struct eth_addr *addr) +{ + uint64_t val = 0; + int i; + + for (i = 0; i < ETH_ADDR_LEN; i++) val |= (addr->addr[i] << (i * 8)); + + return val; +} + +static inline void uint64_to_eth_addr(uint64_t val, struct eth_addr *addr) +{ + int i; + + for (i = 0; i < ETH_ADDR_LEN; i++) addr->addr[i] = ((val >> (i * 8)) & 0xff); +} + +static inline bool eth_addr_is_multicast(struct eth_addr *addr) +{ + return (addr->addr[0] & ETH_ADDR_GROUP); +} + +static inline bool eth_addr_is_zero(struct eth_addr *addr) +{ + int i; + + for (i = 0; i < ETH_ADDR_LEN; i++) { + if (addr->addr[i] != 0) + return false; + } + + return true; +} + +struct eth_hdr { + struct eth_addr dhost; + struct eth_addr shost; + uint16_t type; +} __packed; + +/* + * NOTE: 0x0000-0x05DC (0..1500) are generally IEEE 802.3 length fields. + * However, there are some conflicts. + */ + +#define ETHTYPE_8023 0x0004 /* IEEE 802.3 packet */ + /* 0x0101 .. 0x1FF Experimental */ +#define ETHTYPE_PUP 0x0200 /* Xerox PUP protocol - see 0A00 */ +#define ETHTYPE_PUPAT 0x0200 /* PUP Address Translation - see 0A01 */ +#define ETHTYPE_SPRITE 0x0500 /* ??? */ + /* 0x0400 Nixdorf */ +#define ETHTYPE_NS 0x0600 /* XNS */ +#define ETHTYPE_NSAT 0x0601 /* XNS Address Translation (3Mb only) */ +#define ETHTYPE_DLOG1 0x0660 /* DLOG (?) */ +#define ETHTYPE_DLOG2 0x0661 /* DLOG (?) */ +#define ETHTYPE_IP 0x0800 /* IP protocol */ +#define ETHTYPE_X75 0x0801 /* X.75 Internet */ +#define ETHTYPE_NBS 0x0802 /* NBS Internet */ +#define ETHTYPE_ECMA 0x0803 /* ECMA Internet */ +#define ETHTYPE_CHAOS 0x0804 /* CHAOSnet */ +#define ETHTYPE_X25 0x0805 /* X.25 Level 3 */ +#define ETHTYPE_ARP 0x0806 /* Address resolution protocol */ +#define ETHTYPE_NSCOMPAT 0x0807 /* XNS Compatibility */ +#define ETHTYPE_FRARP 0x0808 /* Frame Relay ARP (RFC1701) */ + /* 0x081C Symbolics Private */ + /* 0x0888 - 0x088A Xyplex */ +#define ETHTYPE_UBDEBUG 0x0900 /* Ungermann-Bass network debugger */ +#define ETHTYPE_IEEEPUP 0x0A00 /* Xerox IEEE802.3 PUP */ +#define ETHTYPE_IEEEPUPAT 0x0A01 /* Xerox IEEE802.3 PUP Address Translation */ +#define ETHTYPE_VINES 0x0BAD /* Banyan VINES */ +#define ETHTYPE_VINESLOOP 0x0BAE /* Banyan VINES Loopback */ +#define ETHTYPE_VINESECHO 0x0BAF /* Banyan VINES Echo */ + +/* 0x1000 - 0x100F Berkeley Trailer */ +/* + * The ETHTYPE_NTRAILER packet types starting at ETHTYPE_TRAIL have + * (type-ETHTYPE_TRAIL)*512 bytes of data followed + * by an ETH type (as given above) and then the (variable-length) header. + */ +#define ETHTYPE_TRAIL 0x1000 /* Trailer packet */ +#define ETHTYPE_NTRAILER 16 + +#define ETHTYPE_DCA 0x1234 /* DCA - Multicast */ +#define ETHTYPE_VALID 0x1600 /* VALID system protocol */ +#define ETHTYPE_DOGFIGHT 0x1989 /* Artificial Horizons ("Aviator" dogfight simulator [on Sun]) */ +#define ETHTYPE_RCL 0x1995 /* Datapoint Corporation (RCL lan protocol) */ + +/* The following 3C0x types + are unregistered: */ +#define ETHTYPE_NBPVCD \ + 0x3C00 /* 3Com NBP virtual circuit datagram (like XNS SPP) not registered \ + */ +#define ETHTYPE_NBPSCD 0x3C01 /* 3Com NBP System control datagram not registered */ +#define ETHTYPE_NBPCREQ 0x3C02 /* 3Com NBP Connect request (virtual cct) not registered */ +#define ETHTYPE_NBPCRSP 0x3C03 /* 3Com NBP Connect response not registered */ +#define ETHTYPE_NBPCC 0x3C04 /* 3Com NBP Connect complete not registered */ +#define ETHTYPE_NBPCLREQ 0x3C05 /* 3Com NBP Close request (virtual cct) not registered */ +#define ETHTYPE_NBPCLRSP 0x3C06 /* 3Com NBP Close response not registered */ +#define ETHTYPE_NBPDG 0x3C07 /* 3Com NBP Datagram (like XNS IDP) not registered */ +#define ETHTYPE_NBPDGB 0x3C08 /* 3Com NBP Datagram broadcast not registered */ +#define ETHTYPE_NBPCLAIM 0x3C09 /* 3Com NBP Claim NetBIOS name not registered */ +#define ETHTYPE_NBPDLTE 0x3C0A /* 3Com NBP Delete NetBIOS name not registered */ +#define ETHTYPE_NBPRAS 0x3C0B /* 3Com NBP Remote adaptor status request not registered */ +#define ETHTYPE_NBPRAR 0x3C0C /* 3Com NBP Remote adaptor response not registered */ +#define ETHTYPE_NBPRST 0x3C0D /* 3Com NBP Reset not registered */ + +#define ETHTYPE_PCS 0x4242 /* PCS Basic Block Protocol */ +#define ETHTYPE_IMLBLDIAG 0x424C /* Information Modes Little Big LAN diagnostic */ +#define ETHTYPE_DIDDLE 0x4321 /* THD - Diddle */ +#define ETHTYPE_IMLBL 0x4C42 /* Information Modes Little Big LAN */ +#define ETHTYPE_SIMNET 0x5208 /* BBN Simnet Private */ +#define ETHTYPE_DECEXPER 0x6000 /* DEC Unassigned, experimental */ +#define ETHTYPE_MOPDL 0x6001 /* DEC MOP dump/load */ +#define ETHTYPE_MOPRC 0x6002 /* DEC MOP remote console */ +#define ETHTYPE_DECnet 0x6003 /* DEC DECNET Phase IV route */ +#define ETHTYPE_DN ETHTYPE_DECnet /* libpcap, tcpdump */ +#define ETHTYPE_LAT 0x6004 /* DEC LAT */ +#define ETHTYPE_DECDIAG 0x6005 /* DEC diagnostic protocol (at interface initialization?) */ +#define ETHTYPE_DECCUST 0x6006 /* DEC customer protocol */ +#define ETHTYPE_SCA 0x6007 /* DEC LAVC, SCA */ +#define ETHTYPE_AMBER 0x6008 /* DEC AMBER */ +#define ETHTYPE_DECMUMPS 0x6009 /* DEC MUMPS */ + /* 0x6010 - 0x6014 3Com Corporation */ +#define ETHTYPE_TRANSETHER 0x6558 /* Trans Ether Bridging (RFC1701)*/ +#define ETHTYPE_RAWFR 0x6559 /* Raw Frame Relay (RFC1701) */ +#define ETHTYPE_UBDL 0x7000 /* Ungermann-Bass download */ +#define ETHTYPE_UBNIU 0x7001 /* Ungermann-Bass NIUs */ +#define ETHTYPE_UBDIAGLOOP 0x7002 /* Ungermann-Bass diagnostic/loopback */ +#define ETHTYPE_UBNMC 0x7003 /* Ungermann-Bass ??? (NMC to/from UB Bridge) */ +#define ETHTYPE_UBBST 0x7005 /* Ungermann-Bass Bridge Spanning Tree */ +#define ETHTYPE_OS9 0x7007 /* OS/9 Microware */ +#define ETHTYPE_OS9NET 0x7009 /* OS/9 Net? */ + /* 0x7020 - 0x7029 LRT (England) (now Sintrom) */ +#define ETHTYPE_RACAL 0x7030 /* Racal-Interlan */ +#define ETHTYPE_PRIMENTS 0x7031 /* Prime NTS (Network Terminal Service) */ +#define ETHTYPE_CABLETRON 0x7034 /* Cabletron */ +#define ETHTYPE_CRONUSVLN 0x8003 /* Cronus VLN */ +#define ETHTYPE_CRONUS 0x8004 /* Cronus Direct */ +#define ETHTYPE_HP 0x8005 /* HP Probe */ +#define ETHTYPE_NESTAR 0x8006 /* Nestar */ +#define ETHTYPE_ATTSTANFORD 0x8008 /* AT&T/Stanford (local use) */ +#define ETHTYPE_EXCELAN 0x8010 /* Excelan */ +#define ETHTYPE_SG_DIAG 0x8013 /* SGI diagnostic type */ +#define ETHTYPE_SG_NETGAMES 0x8014 /* SGI network games */ +#define ETHTYPE_SG_RESV 0x8015 /* SGI reserved type */ +#define ETHTYPE_SG_BOUNCE 0x8016 /* SGI bounce server */ +#define ETHTYPE_APOLLODOMAIN 0x8019 /* Apollo DOMAIN */ +#define ETHTYPE_TYMSHARE 0x802E /* Tymeshare */ +#define ETHTYPE_TIGAN 0x802F /* Tigan, Inc. */ +#define ETHTYPE_REVARP 0x8035 /* Reverse addr resolution protocol */ +#define ETHTYPE_AEONIC 0x8036 /* Aeonic Systems */ +#define ETHTYPE_IPXNEW 0x8037 /* IPX (Novell Netware?) */ +#define ETHTYPE_LANBRIDGE 0x8038 /* DEC LANBridge */ +#define ETHTYPE_DSMD 0x8039 /* DEC DSM/DDP */ +#define ETHTYPE_ARGONAUT 0x803A /* DEC Argonaut Console */ +#define ETHTYPE_VAXELN 0x803B /* DEC VAXELN */ +#define ETHTYPE_DECDNS 0x803C /* DEC DNS Naming Service */ +#define ETHTYPE_ENCRYPT 0x803D /* DEC Ethernet Encryption */ +#define ETHTYPE_DECDTS 0x803E /* DEC Distributed Time Service */ +#define ETHTYPE_DECLTM 0x803F /* DEC LAN Traffic Monitor */ +#define ETHTYPE_DECNETBIOS 0x8040 /* DEC PATHWORKS DECnet NETBIOS Emulation */ +#define ETHTYPE_DECLAST 0x8041 /* DEC Local Area System Transport */ + /* 0x8042 DEC Unassigned */ +#define ETHTYPE_PLANNING 0x8044 /* Planning Research Corp. */ + /* 0x8046 - 0x8047 AT&T */ +#define ETHTYPE_DECAM \ + 0x8048 /* DEC Availability Manager for Distributed Systems DECamds (but someone at DEC says \ + not) */ +#define ETHTYPE_EXPERDATA 0x8049 /* ExperData */ +#define ETHTYPE_VEXP 0x805B /* Stanford V Kernel exp. */ +#define ETHTYPE_VPROD 0x805C /* Stanford V Kernel prod. */ +#define ETHTYPE_ES 0x805D /* Evans & Sutherland */ +#define ETHTYPE_LITTLE 0x8060 /* Little Machines */ +#define ETHTYPE_COUNTERPOINT 0x8062 /* Counterpoint Computers */ + /* 0x8065 - 0x8066 Univ. of Mass @ Amherst */ +#define ETHTYPE_VEECO 0x8067 /* Veeco Integrated Auto. */ +#define ETHTYPE_GENDYN 0x8068 /* General Dynamics */ +#define ETHTYPE_ATT 0x8069 /* AT&T */ +#define ETHTYPE_AUTOPHON 0x806A /* Autophon */ +#define ETHTYPE_COMDESIGN 0x806C /* ComDesign */ +#define ETHTYPE_COMPUGRAPHIC 0x806D /* Compugraphic Corporation */ + /* 0x806E - 0x8077 Landmark Graphics Corp. */ +#define ETHTYPE_MATRA 0x807A /* Matra */ +#define ETHTYPE_DDE 0x807B /* Dansk Data Elektronik */ +#define ETHTYPE_MERIT 0x807C /* Merit Internodal (or Univ of Michigan?) */ + /* 0x807D - 0x807F Vitalink Communications */ +#define ETHTYPE_VLTLMAN 0x8080 /* Vitalink TransLAN III Management */ + /* 0x8081 - 0x8083 Counterpoint Computers */ + /* 0x8088 - 0x808A Xyplex */ +#define ETHTYPE_ATALK 0x809B /* AppleTalk */ +#define ETHTYPE_AT ETHTYPE_ATALK /* old NetBSD */ +#define ETHTYPE_APPLETALK ETHTYPE_ATALK /* HP-UX */ + /* 0x809C - 0x809E Datability */ +#define ETHTYPE_SPIDER 0x809F /* Spider Systems Ltd. */ + /* 0x80A3 Nixdorf */ + /* 0x80A4 - 0x80B3 Siemens Gammasonics Inc. */ +/* 0x80C0 - 0x80C3 DCA (Digital Comm. Assoc.) Data Exchange Cluster */ +/* 0x80C4 - 0x80C5 Banyan Systems */ +#define ETHTYPE_PACER 0x80C6 /* Pacer Software */ +#define ETHTYPE_APPLITEK 0x80C7 /* Applitek Corporation */ + /* 0x80C8 - 0x80CC Intergraph Corporation */ + /* 0x80CD - 0x80CE Harris Corporation */ + /* 0x80CF - 0x80D2 Taylor Instrument */ + /* 0x80D3 - 0x80D4 Rosemount Corporation */ +#define ETHTYPE_SNA 0x80D5 /* IBM SNA Services over Ethernet */ +#define ETHTYPE_VARIAN 0x80DD /* Varian Associates */ +/* 0x80DE - 0x80DF TRFS (Integrated Solutions Transparent Remote File System) */ +/* 0x80E0 - 0x80E3 Allen-Bradley */ +/* 0x80E4 - 0x80F0 Datability */ +#define ETHTYPE_RETIX 0x80F2 /* Retix */ +#define ETHTYPE_AARP 0x80F3 /* AppleTalk AARP */ + /* 0x80F4 - 0x80F5 Kinetics */ +#define ETHTYPE_APOLLO 0x80F7 /* Apollo Computer */ +#define ETHTYPE_VLAN 0x8100 /* IEEE 802.1Q VLAN tagging (XXX conflicts) */ + /* 0x80FF - 0x8101 Wellfleet Communications (XXX conflicts) */ +#define ETHTYPE_BOFL 0x8102 /* Wellfleet; BOFL (Breath OF Life) pkts [every 5-10 secs.] */ +#define ETHTYPE_WELLFLEET 0x8103 /* Wellfleet Communications */ + /* 0x8107 - 0x8109 Symbolics Private */ +#define ETHTYPE_TALARIS 0x812B /* Talaris */ +#define ETHTYPE_WATERLOO 0x8130 /* Waterloo Microsystems Inc. (XXX which?) */ +#define ETHTYPE_HAYES 0x8130 /* Hayes Microcomputers (XXX which?) */ +#define ETHTYPE_VGLAB 0x8131 /* VG Laboratory Systems */ + /* 0x8132 - 0x8137 Bridge Communications */ +#define ETHTYPE_IPX 0x8137 /* Novell (old) NetWare IPX (ECONFIG E option) */ +#define ETHTYPE_NOVELL 0x8138 /* Novell, Inc. */ + /* 0x8139 - 0x813D KTI */ +#define ETHTYPE_MUMPS 0x813F /* M/MUMPS data sharing */ +#define ETHTYPE_AMOEBA 0x8145 /* Vrije Universiteit (NL) Amoeba 4 RPC (obsolete) */ +#define ETHTYPE_FLIP 0x8146 /* Vrije Universiteit (NL) FLIP (Fast Local Internet Protocol) */ +#define ETHTYPE_VURESERVED 0x8147 /* Vrije Universiteit (NL) [reserved] */ +#define ETHTYPE_LOGICRAFT 0x8148 /* Logicraft */ +#define ETHTYPE_NCD 0x8149 /* Network Computing Devices */ +#define ETHTYPE_ALPHA 0x814A /* Alpha Micro */ +#define ETHTYPE_SNMP 0x814C /* SNMP over Ethernet (see RFC1089) */ + /* 0x814D - 0x814E BIIN */ +#define ETHTYPE_TEC 0x814F /* Technically Elite Concepts */ +#define ETHTYPE_RATIONAL 0x8150 /* Rational Corp */ + /* 0x8151 - 0x8153 Qualcomm */ + /* 0x815C - 0x815E Computer Protocol Pty Ltd */ + /* 0x8164 - 0x8166 Charles River Data Systems */ +#define ETHTYPE_XTP 0x817D /* Protocol Engines XTP */ +#define ETHTYPE_SGITW 0x817E /* SGI/Time Warner prop. */ +#define ETHTYPE_HIPPI_FP 0x8180 /* HIPPI-FP encapsulation */ +#define ETHTYPE_STP 0x8181 /* Scheduled Transfer STP, HIPPI-ST */ + /* 0x8182 - 0x8183 Reserved for HIPPI-6400 */ + /* 0x8184 - 0x818C SGI prop. */ +#define ETHTYPE_MOTOROLA 0x818D /* Motorola */ +#define ETHTYPE_NETBEUI 0x8191 /* PowerLAN NetBIOS/NetBEUI (PC) */ + /* 0x819A - 0x81A3 RAD Network Devices */ + /* 0x81B7 - 0x81B9 Xyplex */ + /* 0x81CC - 0x81D5 Apricot Computers */ + /* 0x81D6 - 0x81DD Artisoft Lantastic */ + /* 0x81E6 - 0x81EF Polygon */ + /* 0x81F0 - 0x81F2 Comsat Labs */ + /* 0x81F3 - 0x81F5 SAIC */ + /* 0x81F6 - 0x81F8 VG Analytical */ + /* 0x8203 - 0x8205 QNX Software Systems Ltd. */ + /* 0x8221 - 0x8222 Ascom Banking Systems */ + /* 0x823E - 0x8240 Advanced Encryption Systems */ + /* 0x8263 - 0x826A Charles River Data Systems */ + /* 0x827F - 0x8282 Athena Programming */ + /* 0x829A - 0x829B Inst Ind Info Tech */ + /* 0x829C - 0x82AB Taurus Controls */ + /* 0x82AC - 0x8693 Walker Richer & Quinn */ +#define ETHTYPE_ACCTON 0x8390 /* Accton Technologies (unregistered) */ +#define ETHTYPE_TALARISMC 0x852B /* Talaris multicast */ +#define ETHTYPE_KALPANA 0x8582 /* Kalpana */ + /* 0x8694 - 0x869D Idea Courier */ + /* 0x869E - 0x86A1 Computer Network Tech */ + /* 0x86A3 - 0x86AC Gateway Communications */ +#define ETHTYPE_SECTRA 0x86DB /* SECTRA */ +#define ETHTYPE_IPV6 0x86DD /* IP protocol version 6 */ +#define ETHTYPE_DELTACON 0x86DE /* Delta Controls */ +#define ETHTYPE_ATOMIC 0x86DF /* ATOMIC */ + /* 0x86E0 - 0x86EF Landis & Gyr Powers */ + /* 0x8700 - 0x8710 Motorola */ +#define ETHTYPE_RDP 0x8739 /* Control Technology Inc. RDP Without IP */ +#define ETHTYPE_MICP 0x873A /* Control Technology Inc. Mcast Industrial Ctrl Proto. */ + /* 0x873B - 0x873C Control Technology Inc. Proprietary */ +#define ETHTYPE_TCPCOMP 0x876B /* TCP/IP Compression (RFC1701) */ +#define ETHTYPE_IPAS 0x876C /* IP Autonomous Systems (RFC1701) */ +#define ETHTYPE_SECUREDATA 0x876D /* Secure Data (RFC1701) */ +#define ETHTYPE_FLOWCONTROL 0x8808 /* 802.3x flow control packet */ +#define ETHTYPE_SLOW 0x8809 /* 802.3ad link aggregation (LACP) */ +#define ETHTYPE_PPP 0x880B /* PPP (obsolete by PPPoE) */ +#define ETHTYPE_HITACHI 0x8820 /* Hitachi Cable (Optoelectronic Systems Laboratory) */ +#define ETHTYPE_MPLS 0x8847 /* MPLS Unicast */ +#define ETHTYPE_MPLS_MCAST 0x8848 /* MPLS Multicast */ +#define ETHTYPE_AXIS 0x8856 /* Axis Communications AB proprietary bootstrap/config */ +#define ETHTYPE_PPPOEDISC 0x8863 /* PPP Over Ethernet Discovery Stage */ +#define ETHTYPE_PPPOE 0x8864 /* PPP Over Ethernet Session Stage */ +#define ETHTYPE_LANPROBE 0x8888 /* HP LanProbe test? */ +#define ETHTYPE_PAE 0x888e /* EAPOL PAE/802.1x */ +#define ETHTYPE_8021AB 0x88cc /* Link Layer Discovery Protocol (IEEE 802.1AB) */ +#define ETHTYPE_LOOPBACK 0x9000 /* Loopback: used to test interfaces */ +#define ETHTYPE_LBACK ETHTYPE_LOOPBACK /* DEC MOP loopback */ +#define ETHTYPE_XNSSM 0x9001 /* 3Com (Formerly Bridge Communications), XNS Systems Management */ +#define ETHTYPE_TCPSM \ + 0x9002 /* 3Com (Formerly Bridge Communications), TCP/IP Systems Management \ + */ +#define ETHTYPE_BCLOOP 0x9003 /* 3Com (Formerly Bridge Communications), loopback detection */ +#define ETHTYPE_DEBNI 0xAAAA /* DECNET? Used by VAX 6220 DEBNI */ +#define ETHTYPE_SONIX 0xFAF5 /* Sonix Arpeggio */ +#define ETHTYPE_VITAL 0xFF00 /* BBN VITAL-LanBridge cache wakeups */ + /* 0xFF00 - 0xFFOF ISC Bunker Ramo */ + +#define ETHTYPE_MAX 0xFFFF /* Maximum valid ethernet type, reserved */ diff --git a/include/net/icmp.h b/include/net/icmp.h new file mode 100644 index 0000000..05dcf67 --- /dev/null +++ b/include/net/icmp.h @@ -0,0 +1,203 @@ +/*- + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ip_icmp.h 8.1 (Berkeley) 6/10/93 + * $FreeBSD$ + */ + +#pragma once + +#include +#include + +/* + * Interface Control Message Protocol Definitions. + * Per RFC 792, September 1981. + */ + +/* + * Internal of an ICMP Router Advertisement + */ +struct icmp_ra_addr { + uint32_t ira_addr; + uint32_t ira_preference; +} __packed; + +/* + * Structure of an icmp header. + */ +struct icmp_hdr { + uint8_t type; /* type of message, see below */ + uint8_t code; /* type sub code */ + uint16_t cksum; /* ones complement cksum of struct */ +} __packed; + +/* + * Structure of an icmp packet. + */ +struct icmp_pkt { + struct icmp_hdr hdr; + union { + uint8_t ih_pptr; /* ICMP_PARAMPROB */ + uint32_t ih_gwaddr; /* ICMP_REDIRECT */ + struct ih_idseq { + uint16_t icd_id; /* network format */ + uint16_t icd_seq; /* network format */ + } ih_idseq; + int32_t ih_void; + + /* ICMP_UNREACH_NEEDFRAG -- Path MTU Discovery (RFC1191) */ + struct ih_pmtu { + uint16_t ipm_void; /* network format */ + uint16_t ipm_nextmtu; /* network format */ + } ih_pmtu; + + struct ih_rtradv { + uint8_t irt_num_addrs; + uint8_t irt_wpa; + uint16_t irt_lifetime; + } ih_rtradv; + } icmp_hun; +#define icmp_pptr icmp_hun.ih_pptr +#define icmp_gwaddr icmp_hun.ih_gwaddr +#define icmp_id icmp_hun.ih_idseq.icd_id +#define icmp_seq icmp_hun.ih_idseq.icd_seq +#define icmp_void icmp_hun.ih_void +#define icmp_pmvoid icmp_hun.ih_pmtu.ipm_void +#define icmp_nextmtu icmp_hun.ih_pmtu.ipm_nextmtu +#define icmp_num_addrs icmp_hun.ih_rtradv.irt_num_addrs +#define icmp_wpa icmp_hun.ih_rtradv.irt_wpa +#define icmp_lifetime icmp_hun.ih_rtradv.irt_lifetime + union { + struct id_ts { /* ICMP Timestamp */ + /* + * The next 3 fields are in network format, + * milliseconds since 00:00 GMT + */ + uint32_t its_otime; /* Originate */ + uint32_t its_rtime; /* Receive */ + uint32_t its_ttime; /* Transmit */ + } id_ts; + struct id_ip { + uint32_t idi_ip; + /* options and then 64 bits of data */ + } id_ip; + struct icmp_ra_addr id_radv; + uint32_t id_mask; + char id_data[1]; + } icmp_dun; +#define icmp_otime icmp_dun.id_ts.its_otime +#define icmp_rtime icmp_dun.id_ts.its_rtime +#define icmp_ttime icmp_dun.id_ts.its_ttime +#define icmp_ip icmp_dun.id_ip.idi_ip +#define icmp_radv icmp_dun.id_radv +#define icmp_mask icmp_dun.id_mask +#define icmp_data icmp_dun.id_data +} __packed; + +/* + * Lower bounds on packet lengths for various types. + * For the error advice packets must first insure that the + * packet is large enough to contain the returned ip header. + * Only then can we do the check to see if 64 bits of packet + * data have been returned, since we need to check the returned + * ip header length. + */ +#define ICMP_MINLEN 8 /* abs minimum */ +#define ICMP_TSLEN (8 + 3 * sizeof(uint32_t)) /* timestamp */ +#define ICMP_MASKLEN 12 /* address mask */ +#define ICMP_ADVLENMIN (8 + sizeof(struct ip) + 8) /* min */ +#define ICMP_ADVLEN(p) (8 + ((p)->icmp_ip.ip_hl << 2) + 8) +/* N.B.: must separately check that ip_hl >= 5 */ + +/* + * Definition of type and code field values. + */ +#define ICMP_ECHOREPLY 0 /* echo reply */ +#define ICMP_UNREACH 3 /* dest unreachable, codes: */ +#define ICMP_UNREACH_NET 0 /* bad net */ +#define ICMP_UNREACH_HOST 1 /* bad host */ +#define ICMP_UNREACH_PROTOCOL 2 /* bad protocol */ +#define ICMP_UNREACH_PORT 3 /* bad port */ +#define ICMP_UNREACH_NEEDFRAG 4 /* IP_DF caused drop */ +#define ICMP_UNREACH_SRCFAIL 5 /* src route failed */ +#define ICMP_UNREACH_NET_UNKNOWN 6 /* unknown net */ +#define ICMP_UNREACH_HOST_UNKNOWN 7 /* unknown host */ +#define ICMP_UNREACH_ISOLATED 8 /* src host isolated */ +#define ICMP_UNREACH_NET_PROHIB 9 /* prohibited access */ +#define ICMP_UNREACH_HOST_PROHIB 10 /* ditto */ +#define ICMP_UNREACH_TOSNET 11 /* bad tos for net */ +#define ICMP_UNREACH_TOSHOST 12 /* bad tos for host */ +#define ICMP_UNREACH_FILTER_PROHIB 13 /* admin prohib */ +#define ICMP_UNREACH_HOST_PRECEDENCE 14 /* host prec vio. */ +#define ICMP_UNREACH_PRECEDENCE_CUTOFF 15 /* prec cutoff */ +#define ICMP_SOURCEQUENCH 4 /* packet lost, slow down */ +#define ICMP_REDIRECT 5 /* shorter route, codes: */ +#define ICMP_REDIRECT_NET 0 /* for network */ +#define ICMP_REDIRECT_HOST 1 /* for host */ +#define ICMP_REDIRECT_TOSNET 2 /* for tos and net */ +#define ICMP_REDIRECT_TOSHOST 3 /* for tos and host */ +#define ICMP_ALTHOSTADDR 6 /* alternate host address */ +#define ICMP_ECHO 8 /* echo service */ +#define ICMP_ROUTERADVERT 9 /* router advertisement */ +#define ICMP_ROUTERADVERT_NORMAL 0 /* normal advertisement */ +#define ICMP_ROUTERADVERT_NOROUTE_COMMON 16 /* selective routing */ +#define ICMP_ROUTERSOLICIT 10 /* router solicitation */ +#define ICMP_TIMXCEED 11 /* time exceeded, code: */ +#define ICMP_TIMXCEED_INTRANS 0 /* ttl==0 in transit */ +#define ICMP_TIMXCEED_REASS 1 /* ttl==0 in reass */ +#define ICMP_PARAMPROB 12 /* ip header bad */ +#define ICMP_PARAMPROB_ERRATPTR 0 /* error at param ptr */ +#define ICMP_PARAMPROB_OPTABSENT 1 /* req. opt. absent */ +#define ICMP_PARAMPROB_LENGTH 2 /* bad length */ +#define ICMP_TSTAMP 13 /* timestamp request */ +#define ICMP_TSTAMPREPLY 14 /* timestamp reply */ +#define ICMP_IREQ 15 /* information request */ +#define ICMP_IREQREPLY 16 /* information reply */ +#define ICMP_MASKREQ 17 /* address mask request */ +#define ICMP_MASKREPLY 18 /* address mask reply */ +#define ICMP_TRACEROUTE 30 /* traceroute */ +#define ICMP_DATACONVERR 31 /* data conversion error */ +#define ICMP_MOBILE_REDIRECT 32 /* mobile host redirect */ +#define ICMP_IPV6_WHEREAREYOU 33 /* IPv6 where-are-you */ +#define ICMP_IPV6_IAMHERE 34 /* IPv6 i-am-here */ +#define ICMP_MOBILE_REGREQUEST 35 /* mobile registration req */ +#define ICMP_MOBILE_REGREPLY 36 /* mobile registration reply */ +#define ICMP_SKIP 39 /* SKIP */ +#define ICMP_PHOTURIS 40 /* Photuris */ +#define ICMP_PHOTURIS_UNKNOWN_INDEX 1 /* unknown sec index */ +#define ICMP_PHOTURIS_AUTH_FAILED 2 /* auth failed */ +#define ICMP_PHOTURIS_DECRYPT_FAILED 3 /* decrypt failed */ + +#define ICMP_MAXTYPE 40 + +#define ICMP_INFOTYPE(type) \ + ((type) == ICMP_ECHOREPLY || (type) == ICMP_ECHO || (type) == ICMP_ROUTERADVERT || \ + (type) == ICMP_ROUTERSOLICIT || (type) == ICMP_TSTAMP || (type) == ICMP_TSTAMPREPLY || \ + (type) == ICMP_IREQ || (type) == ICMP_IREQREPLY || (type) == ICMP_MASKREQ || \ + (type) == ICMP_MASKREPLY) diff --git a/include/net/ip.h b/include/net/ip.h new file mode 100644 index 0000000..6758e53 --- /dev/null +++ b/include/net/ip.h @@ -0,0 +1,74 @@ +#pragma once + +#include +#include + +#include +#include + +#define MAKE_IP_ADDR(a, b, c, d) \ + (((uint32_t)a << 24) | ((uint32_t)b << 16) | ((uint32_t)c << 8) | (uint32_t)d) + +#define IP_ADDR_STR_LEN 16 + +/* + * Structure of an internet header, naked of options. + */ +struct ip_hdr { +#if __BYTE_ORDER == __LITTLE_ENDIAN + uint8_t header_len : 4; /* header length */ + uint8_t version : 4; /* version */ +#endif +#if __BYTE_ORDER == __BIG_ENDIAN + uint8_t version : 4; /* version */ + uint8_t header_len : 4; /* header length */ +#endif + uint8_t tos; /* type of service */ + uint16_t len; /* total length */ + uint16_t id; /* identification */ + uint16_t off; /* fragment offset field */ +#define IP_RF 0x8000 /* reserved fragment flag */ +#define IP_DF 0x4000 /* dont fragment flag */ +#define IP_MF 0x2000 /* more fragments flag */ +#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */ + uint8_t ttl; /* time to live */ + uint8_t proto; /* protocol */ + uint16_t cksum; /* checksum */ + uint32_t saddr; /* source address */ + uint32_t daddr; /* dest address */ +} __packed __aligned(4); + +/* + * Definitions for DiffServ Codepoints as per RFC2474 + */ +#define IPTOS_DSCP_CS0 0x00 +#define IPTOS_DSCP_CS1 0x20 +#define IPTOS_DSCP_AF11 0x28 +#define IPTOS_DSCP_AF12 0x30 +#define IPTOS_DSCP_AF13 0x38 +#define IPTOS_DSCP_CS2 0x40 +#define IPTOS_DSCP_AF21 0x48 +#define IPTOS_DSCP_AF22 0x50 +#define IPTOS_DSCP_AF23 0x58 +#define IPTOS_DSCP_CS3 0x60 +#define IPTOS_DSCP_AF31 0x68 +#define IPTOS_DSCP_AF32 0x70 +#define IPTOS_DSCP_AF33 0x78 +#define IPTOS_DSCP_CS4 0x80 +#define IPTOS_DSCP_AF41 0x88 +#define IPTOS_DSCP_AF42 0x90 +#define IPTOS_DSCP_AF43 0x98 +#define IPTOS_DSCP_CS5 0xa0 +#define IPTOS_DSCP_EF 0xb8 +#define IPTOS_DSCP_CS6 0xc0 +#define IPTOS_DSCP_CS7 0xe0 + +/* + * ECN (Explicit Congestion Notification) codepoints in RFC3168 mapped to the + * lower 2 bits of the TOS field. + */ +#define IPTOS_ECN_NOTECT 0x00 /* not-ECT */ +#define IPTOS_ECN_ECT1 0x01 /* ECN-capable transport (1) */ +#define IPTOS_ECN_ECT0 0x02 /* ECN-capable transport (0) */ +#define IPTOS_ECN_CE 0x03 /* congestion experienced */ +#define IPTOS_ECN_MASK 0x03 /* ECN field mask */ diff --git a/include/net/mbuf.h b/include/net/mbuf.h new file mode 100644 index 0000000..8126983 --- /dev/null +++ b/include/net/mbuf.h @@ -0,0 +1,355 @@ +/* + * mbuf.h - buffer management for network packets + * + * TODO: Maybe consider adding refcounts to mbuf's. Let's wait until this turns + * out to be necessary. + */ + +#pragma once + +#include + +#include +#include +#include +#include +#include + +#define MBUF_DEFAULT_LEN 2048 +#define MBUF_DEFAULT_HEADROOM 128 + +struct mbuf { + struct mbuf *next; /* the next mbuf in the mbufq */ + unsigned char *head; /* start of the buffer */ + unsigned char *data; /* current position within the buffer */ + unsigned int head_len; /* length of the entire buffer from @head */ + unsigned int len; /* length of the data */ + unsigned int csum_type; /* type of checksum */ + unsigned int csum; /* 16-bit one's complement */ + + union { + unsigned int txflags; /* TX offload flags */ + unsigned int rss_hash; /* RSS 5-tuple hash from HW */ + }; + + unsigned short network_off; /* the offset of the network header */ + unsigned short transport_off; /* the offset of the transport header */ + unsigned long release_data; /* data for the release method */ + void (*release)(struct mbuf *m); /* frees the mbuf */ + + /* TCP fields */ + struct list_node link; /* list node for RX and TX queues */ + uint64_t timestamp; /* the time the packet was last sent */ + uint32_t seg_seq; /* the first seg number */ + uint32_t seg_end; /* the last seg number (noninclusive) */ + uint8_t flags; /* which flags were set? */ + atomic_int ref; /* a reference count for the mbuf */ +}; + +static inline unsigned char *__mbuf_pull(struct mbuf *m, unsigned int len) +{ + unsigned char *tmp = m->data; + m->len -= len; + m->data += len; + return tmp; +} + +/** + * mbuf_pull - strips data from the beginning of the buffer + * @m: the packet + * @len: the length in bytes to strip + * + * Returns the previous start of the buffer. + */ +static inline unsigned char *mbuf_pull(struct mbuf *m, unsigned int len) +{ + BUG_ON(len > m->len); + return __mbuf_pull(m, len); +} + +/** + * mbuf_pull_or_null - strips data from the beginning of the buffer + * @m: the packet + * @len: the length in bytes to strip + * + * Returns the previous start of the buffer or NULL if the buffer is smaller + * than @len. + */ +static inline unsigned char *mbuf_pull_or_null(struct mbuf *m, unsigned int len) +{ + return m->len >= len ? __mbuf_pull(m, len) : NULL; +} + +/** + * mbuf_push - prepends data to the beginning of the buffer + * @m: the packet + * @len: the length in bytes to prepend + * + * Returns the new start of the buffer. + */ +static inline unsigned char *mbuf_push(struct mbuf *m, unsigned int len) +{ + m->data -= len; + BUG_ON(m->data < m->head); + m->len += len; + return m->data; +} + +/** + * mbuf_put - appends data to the end of the buffer + * @m: the packet + * @len: the length in bytes to append + * + * Returns the previous end of the buffer. + */ +static inline unsigned char *mbuf_put(struct mbuf *m, unsigned int len) +{ + unsigned char *tmp = m->data + m->len; + m->len += len; + BUG_ON(m->len > m->head_len); + return tmp; +} + +/** + * mbuf_trim - strips data off the end of the buffer + * @m: the packet + * @len: the length in bytes to strip + * + * Returns a pointer to the start of the bytes that were stripped. + */ +static inline unsigned char *mbuf_trim(struct mbuf *m, unsigned int len) +{ + BUG_ON(len > m->len); + m->len -= len; + return m->data + m->len; +} + +/** + * mbuf_reset - forces an mbuf to reposition the data pointer to an offset + * @m: the packet + * @offset: the new data offset from @m->head + */ +static inline void mbuf_reset(struct mbuf *m, unsigned int offset) +{ + unsigned int total_len = m->data - m->head + m->len; + BUG_ON(offset > total_len); + m->data = m->head + offset; + m->len = total_len - offset; +} + +/** + * mbuf_headroom - returns the space available before the start of the buffer + * @m: the packet + */ +static inline unsigned int mbuf_headroom(struct mbuf *m) +{ + return m->data - m->head; +} + +/** + * mbuf_tailroom - returns the space available after the end of the buffer + * @m: the packet + */ +static inline unsigned int mbuf_tailroom(struct mbuf *m) +{ + return m->head + m->head_len - m->data - m->len; +} + +/** + * mbuf_data - returns the current data pointer + * @m: the packet + */ +static inline unsigned char *mbuf_data(struct mbuf *m) +{ + return m->data; +} + +/** + * mbuf_length - returns the current data length + * @m: the packet + */ +static inline unsigned int mbuf_length(struct mbuf *m) +{ + return m->len; +} + +/* + * These marcos automatically typecast and determine the size of header structs. + * In most situations you should use these instead of the raw ops above. + */ +#define mbuf_pull_hdr(mbuf, hdr) (typeof(hdr) *)mbuf_pull(mbuf, sizeof(hdr)) + +#define mbuf_pull_hdr_or_null(mbuf, hdr) (typeof(hdr) *)mbuf_pull_or_null(mbuf, sizeof(hdr)) + +#define mbuf_push_hdr(mbuf, hdr) (typeof(hdr) *)mbuf_push(mbuf, sizeof(hdr)) + +#define mbuf_put_hdr(mbuf, hdr) (typeof(hdr) *)mbuf_put(mbuf, sizeof(hdr)) + +#define mbuf_trim_hdr(mbuf, hdr) (typeof(hdr) *)mbuf_trim(mbuf, sizeof(hdr)) + +/** + * mbuf_mark_network_offset - sets the network offset to the data pointer + * @m: the mbuf in which to set the network offset + */ +static inline void mbuf_mark_network_offset(struct mbuf *m) +{ + ptrdiff_t off = m->data - m->head; + assert(off <= USHRT_MAX); + m->network_off = off; +} + +/** + * mbuf_mark_transport_offset - sets the transport offset to the data pointer + * @m: the mbuf in which to set the transport offset + */ +static inline void mbuf_mark_transport_offset(struct mbuf *m) +{ + ptrdiff_t off = m->data - m->head; + assert(off <= USHRT_MAX); + m->transport_off = off; +} + +/** + * mbuf_network_offset - returns a pointer to the network header + * @m: the mbuf containing the network offset + */ +static inline unsigned char *mbuf_network_offset(struct mbuf *m) +{ + return m->head + m->network_off; +} + +/** + * mbuf_network_offset - returns a pointer to the transport header + * @m: the mbuf containing the transport offset + */ +static inline unsigned char *mbuf_transport_offset(struct mbuf *m) +{ + return m->head + m->transport_off; +} + +#define mbuf_network_hdr(mbuf, hdr) (typeof(hdr) *)mbuf_network_offset(mbuf) + +#define mbuf_transport_hdr(mbuf, hdr) (typeof(hdr) *)mbuf_transport_offset(mbuf) + +/** + * mbuf_init - initializes an mbuf + * @m: the packet to initialize + * @head: the start of the backing buffer + * @head_len: the length of backing buffer + * @reserve_len: the number of bytes to reserve at the start of @head + */ +static inline void mbuf_init(struct mbuf *m, unsigned char *head, unsigned int head_len, + unsigned int reserve_len) +{ + assert(reserve_len < head_len); + m->head = head; + m->head_len = head_len; + m->data = m->head + reserve_len; + m->len = 0; +} + +/** + * mbuf_free - frees an mbuf back to an allocator + * @m: the mbuf to free + */ +static inline void mbuf_free(struct mbuf *m) +{ + m->release(m); +} + +struct mbuf *mbuf_clone(struct mbuf *dst, struct mbuf *src); + +struct mbufq { + struct mbuf *head, *tail; +}; + +/** + * mbufq_push_tail - push an mbuf to the tail of the queue + * @q: the mbuf queue + * @m: the mbuf to push + */ +static inline void mbufq_push_tail(struct mbufq *q, struct mbuf *m) +{ + m->next = NULL; + if (!q->head) { + q->head = q->tail = m; + return; + } + q->tail->next = m; + q->tail = m; +} + +/** + * mbufq_pop_head - pop an mbuf from the head of the queue + * @q: the mbuf queue + * + * Returns an mbuf or NULL if the queue is empty. + */ +static inline struct mbuf *mbufq_pop_head(struct mbufq *q) +{ + struct mbuf *head = q->head; + if (!head) + return NULL; + q->head = head->next; + return head; +} + +/** + * mbufq_peak_head - reads the head of the queue without popping + * @q: the mbuf queue + * + * Returns an mbuf or NULL if the queue is empty. + */ +static inline struct mbuf *mbufq_peak_head(struct mbufq *q) +{ + return q->head; +} + +/** + * mbufq_merge_to_tail - merges a queue to the end of another queue + * @dst: the destination queue (will contain all the mbufs) + * @src: the source queue (will become empty) + */ +static inline void mbufq_merge_to_tail(struct mbufq *dst, struct mbufq *src) +{ + if (!src->head) + return; + if (!dst->head) + dst->head = src->head; + else + dst->tail->next = src->head; + dst->tail = src->tail; + src->head = NULL; +} + +/** + * mbufq_empty - returns true if the queue is empty + */ +static inline bool mbufq_empty(struct mbufq *q) +{ + return q->head == NULL; +} + +/** + * mbufq_release - frees all the mbufs in the queue + * @q: the queue to release + */ +static inline void mbufq_release(struct mbufq *q) +{ + struct mbuf *m; + while (true) { + m = mbufq_pop_head(q); + if (!m) + break; + mbuf_free(m); + } +} + +/** + * mbufq_init - initializes a queue + * @q: the mbuf queue to initialize + */ +static inline void mbufq_init(struct mbufq *q) +{ + q->head = NULL; +} diff --git a/include/net/ping.h b/include/net/ping.h new file mode 100644 index 0000000..7e2a3ef --- /dev/null +++ b/include/net/ping.h @@ -0,0 +1,6 @@ +#pragma once + +#include + +#include + diff --git a/include/net/tcp.h b/include/net/tcp.h new file mode 100644 index 0000000..ec4c794 --- /dev/null +++ b/include/net/tcp.h @@ -0,0 +1,46 @@ +/* + * tcp.h - Transmission Control Protocol (TCP) definitions + * + * Based on Freebsd, BSD licensed. + */ + +#pragma once + +#include +#include + +typedef uint32_t tcp_seq; + +/* + * TCP header. + * Per RFC 793, September, 1981. + */ +struct tcp_hdr { + uint16_t sport; /* source port */ + uint16_t dport; /* destination port */ + tcp_seq seq; /* sequence number */ + tcp_seq ack; /* acknowledgement number */ +#if __BYTE_ORDER == __LITTLE_ENDIAN + uint8_t x2 : 4, /* (unused) */ + off : 4; /* data offset */ +#endif +#if __BYTE_ORDER == __BIG_ENDIAN + uint8_t off : 4, /* data offset */ + x2 : 4; /* (unused) */ +#endif + uint8_t flags; +#define TCP_FIN 0x01 +#define TCP_SYN 0x02 +#define TCP_RST 0x04 +#define TCP_PUSH 0x08 +#define TCP_ACK 0x10 +#define TCP_URG 0x20 +#define TCP_ECE 0x40 +#define TCP_CWR 0x80 +#define TCP_FLAGS (TCP_FIN | TCP_SYN | TCP_RST | TCP_PUSH | TCP_ACK | TCP_URG | TCP_ECE | TCP_CWR) +#define PRINT_TCP_FLAGS "\20\1FIN\2SYN\3RST\4PUSH\5ACK\6URG\7ECE\10CWR" + + uint16_t win; /* window */ + uint16_t sum; /* checksum */ + uint16_t urp; /* urgent pointer */ +}; diff --git a/include/net/udp.h b/include/net/udp.h new file mode 100644 index 0000000..fae7c5d --- /dev/null +++ b/include/net/udp.h @@ -0,0 +1,14 @@ +/* + * udp.h - User Datagram Protocol + */ + +#pragma once + +#include + +struct udp_hdr { + uint16_t src_port; + uint16_t dst_port; + uint16_t len; + uint16_t cksum; +}; diff --git a/include/skyloft/global.h b/include/skyloft/global.h new file mode 100644 index 0000000..bf988f8 --- /dev/null +++ b/include/skyloft/global.h @@ -0,0 +1,33 @@ +#ifndef _GLOBAL_H_ +#define _GLOBAL_H_ + +#include +#include + +#include + +#include +#include + +#define DAEMON_APP_ID 0 + +#define SHM_META_PATH "/dev/shm/skyloft_meta" +#define SHM_APPS_PATH "/dev/shm/skyloft_apps" +#define SHM_INGRESS_PATH "/mnt/huge/skyloft_ingress" +#define SHM_INGRESS_KEY 0x696d736b /* "imsk" */ +#define SHM_INGRESS_SIZE 0x20000000 + +struct metadata { + spinlock_t lock; + volatile int nr_apps; + volatile uint64_t boot_time_us; + /* maps cpu to app */ + volatile atomic_int apps[USED_CPUS]; +}; + +extern struct metadata *shm_metadata; +extern struct proc *shm_apps; + +int global_init(void); + +#endif // _GLOBAL_H_ diff --git a/include/skyloft/io.h b/include/skyloft/io.h new file mode 100644 index 0000000..67f5a9c --- /dev/null +++ b/include/skyloft/io.h @@ -0,0 +1,137 @@ +/* + * io.h: I/O thread definitions + */ + +#pragma once + +#include +#include +#include +#include +#include + +/* preamble to ingress network packets */ +struct rx_net_hdr { + uint64_t completion_data; /* a tag to help complete the request */ + uint32_t len; /* the length of the payload */ + uint32_t rss_hash; /* the HW RSS 5-tuple hash */ + uint32_t csum_type; /* the type of checksum */ + uint32_t csum; /* 16-bit one's complement */ + char payload[]; /* packet data */ +}; + +/* preamble to egress network packets */ +struct tx_net_hdr { + uint64_t completion_data; /* a tag to help complete the request */ + uint32_t len; /* the length of the payload */ + uint32_t olflags; /* offload flags */ + uint16_t pad; /* because of 14 byte ethernet header */ + uint8_t payload[]; /* packet data */ +} __attribute__((__packed__)); + +/* possible values for @csum_type above */ +enum { + /* + * Hardware did not provide checksum information. + */ + CHECKSUM_TYPE_NEEDED = 0, + + /* + * The checksum was verified by hardware and found to be valid. + */ + CHECKSUM_TYPE_UNNECESSARY, + + /* + * Hardware provided a 16 bit one's complement sum from after the LL + * header to the end of the packet. VLAN tags (if present) are included + * in the sum. This is the most robust checksum type because it's useful + * even if the NIC can't parse the headers. + */ + CHECKSUM_TYPE_COMPLETE, +}; + +/* possible values for @olflags above */ +#define OLFLAG_IP_CHKSUM BIT(0) /* enable IP checksum generation */ +#define OLFLAG_TCP_CHKSUM BIT(1) /* enable TCP checksum generation */ +#define OLFLAG_IPV4 BIT(2) /* indicates the packet is IPv4 */ +#define OLFLAG_IPV6 BIT(3) /* indicates the packet is IPv6 */ + +/* + * RX queues: IOKERNEL -> RUNTIMES + * These queues multiplex several different types of requests. + */ +enum { + RX_NET_RECV = 0, /* points to a struct rx_net_hdr */ + RX_NET_COMPLETE, /* contains tx_net_hdr.completion_data */ + RX_CALL_NR, /* number of commands */ +}; + +/* + * TX packet queues: RUNTIMES -> IOKERNEL + * These queues are only for network packets and can experience HOL blocking. + */ +enum { + TXPKT_NET_XMIT = 0, /* points to a struct tx_net_hdr */ + TXPKT_NR, /* number of commands */ +}; + +/* + * TX command queues: RUNTIMES -> IOKERNEL + * These queues handle a variety of commands, and typically they are handled + * much faster by the IOKERNEL than packets, so no HOL blocking. + */ +enum { + TXCMD_NET_COMPLETE = 0, /* contains rx_net_hdr.completion_data */ + TXCMD_NR, /* number of commands */ +}; + +/* + * Helpers + */ + +int str_to_ip(const char *str, uint32_t *addr); +int str_to_mac(const char *str, struct eth_addr *addr); + +/* + * I/O thread + */ + +struct iothread_t { + /* dpdk port id */ + int port_id; + /* dpdk rx memory pool */ + struct rte_mempool *rx_mbuf_pool; + /* dpdk tx memory pool */ + struct rte_mempool *tx_mbuf_pool; + /* communication shared memory */ + struct shm_region tx_region; + /* network configurations */ + uint32_t addr; + uint32_t netmask; + uint32_t gateway; + struct eth_addr mac; +}; + +extern struct iothread_t *io; +extern physaddr_t* page_paddrs; + +int iothread_init(void); +__noreturn void iothread_main(void); + +int dpdk_init(void); +void dpdk_print_eth_stats(void); +int dpdk_late_init(void); + +int rx_init(void); +bool rx_burst(void); +bool rx_send_to_runtime(struct proc *p, uint32_t hash, uint64_t cmd, uint64_t payload); + +int tx_init(void); +bool tx_burst(void); +int tx_drain_completions(struct proc *p, int n); +bool tx_send_completion(void *obj); + +bool cmd_rx_burst(void); + +/* the egress buffer pool must be large enough to fill all the TXQs entirely */ +#define EGRESS_POOL_SIZE(nks) (IO_PKTQ_SIZE * MBUF_DEFAULT_LEN * MAX(16, (nks)) * 16UL) \ No newline at end of file diff --git a/include/skyloft/mm.h b/include/skyloft/mm.h new file mode 100644 index 0000000..5e20a44 --- /dev/null +++ b/include/skyloft/mm.h @@ -0,0 +1,11 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +int mm_init_percpu(); +int mm_init(); diff --git a/include/skyloft/mm/mempool.h b/include/skyloft/mm/mempool.h new file mode 100644 index 0000000..c275cdd --- /dev/null +++ b/include/skyloft/mm/mempool.h @@ -0,0 +1,59 @@ +/* + * mempool.h - a simple, preallocated pool of memory + */ + +#pragma once + +#include +#include + +struct mempool { + void **free_items; + size_t allocated; + size_t capacity; + void *buf; + size_t len; + size_t pg_sz; + size_t item_len; +}; + +#ifdef DEBUG +extern void __mempool_alloc_debug_check(struct mempool *m, void *item); +extern void __mempool_free_debug_check(struct mempool *m, void *item); +#else /* DEBUG */ +static inline void __mempool_alloc_debug_check(struct mempool *m, void *item) {} +static inline void __mempool_free_debug_check(struct mempool *m, void *item) {} +#endif /* DEBUG */ + +/** + * mempool_alloc - allocates an item from the pool + * @m: the memory pool to allocate from + * + * Returns an item, or NULL if the pool is empty. + */ +static inline void *mempool_alloc(struct mempool *m) +{ + void *item; + if (unlikely(m->allocated >= m->capacity)) + return NULL; + item = m->free_items[m->allocated++]; + __mempool_alloc_debug_check(m, item); + return item; +} + +/** + * mempool_free - returns an item to the pool + * @m: the memory pool the item was allocated from + * @item: the item to return + */ +static inline void mempool_free(struct mempool *m, void *item) +{ + __mempool_free_debug_check(m, item); + m->free_items[--m->allocated] = item; + assert(m->allocated <= m->capacity); /* could have overflowed */ +} + +int mempool_create(struct mempool *m, void *buf, size_t len, size_t pgsize, size_t item_len); +void mempool_destroy(struct mempool *m); + +struct tcache *mempool_create_tcache(struct mempool *m, const char *name, unsigned int mag_size); diff --git a/include/skyloft/mm/page.h b/include/skyloft/mm/page.h new file mode 100644 index 0000000..91c286c --- /dev/null +++ b/include/skyloft/mm/page.h @@ -0,0 +1,227 @@ +/* + * page.h - page-level memory management + */ + +#pragma once + +#include +#include +#include +#include +#include + +struct slab_node; + +struct page { + int flags; + struct kref ref; + void *next; + struct list_node link; + struct slab_node *snode; + off_t offset; + physaddr_t paddr; + long item_count; +}; + +#define PAGE_FLAG_LARGE 0x01 /* page is large */ +#define PAGE_FLAG_IN_USE 0x02 /* page is allocated */ +#define PAGE_FLAG_SLAB 0x04 /* page is used by SLAB */ +#define PAGE_FLAG_SHATTERED 0x08 /* page is 2MB shattered into 4KB */ +#define PAGE_FLAG_PGDIR 0x10 /* page is being used as a PDE */ + +/* meta-data length for small pages */ +#define SMPAGE_META_LEN (PGSIZE_2MB / PGSIZE_4KB * sizeof(struct page)) + +/* meta-data large page count for large pages (per NUMA node) */ +#define LGPAGE_META_NR_LGPAGES 1 + +/* meta-data length for large pages (per NUMA node) */ +#define LGPAGE_META_LEN (LGPAGE_META_NR_LGPAGES * PGSIZE_2MB) + +/* the number of large page meta-data entries (per NUMA node) */ +#define LGPAGE_META_ENTS (LGPAGE_META_LEN / sizeof(struct page)) + +/* the size of the page map address space (per NUMA node) */ +#define LGPAGE_NODE_ADDR_LEN (LGPAGE_META_ENTS * PGSIZE_2MB) + +/* per NUMA node page tables are stored in a contiguous array */ +extern struct page *page_tbl; + +#define PAGE_BASE_ADDR 0x100000000000UL /* the start of page mappings */ +#define PAGE_END_ADDR (PAGE_BASE_ADDR + LGPAGE_META_ENTS * PGSIZE_2MB * MAX_NUMA) + +/** + * is_page_addr - determines if an address is inside page memory + * @addr: the address of the page + * + * Returns true if the address is inside page memory. + */ +static inline bool is_page_addr(void *addr) +{ + return ((uintptr_t)addr >= PAGE_BASE_ADDR && (uintptr_t)addr < PAGE_END_ADDR); +} + +/** + * addr_to_numa_node - gets the NUMA node of a page's address + * @addr: the page's address + * + * Returns a numa node + */ +static inline int addr_to_numa_node(void *addr) +{ + return ((uintptr_t)addr - PAGE_BASE_ADDR) / LGPAGE_NODE_ADDR_LEN; +} + +#define LGPGN(addr) PGN_2MB((uintptr_t)(addr) - (uintptr_t)PAGE_BASE_ADDR) +#define PGN(addr) PGN_4KB((uintptr_t)(addr)-PGADDR_2MB(addr)) + +/** + * pa_to_lgpage - gets the large page struct for an address + * @addr: the address + * + * Returns a pointer to the large page struct + */ +static inline struct page *addr_to_lgpage(void *addr) +{ + return page_tbl + LGPGN(addr); +} + +/** + * lgpage_to_addr - gets the address for a large page struct + * @pg: the large page struct + * + * Returns the address + */ +static inline void *lgpage_to_addr(struct page *pg) +{ + return (void *)(PAGE_BASE_ADDR + (pg - page_tbl) * PGSIZE_2MB); +} + +/** + * addr_to_smpage - gets the small page struct for an address + * @addr: the address + * + * Returns a pointer to the page struct + */ +static inline struct page *addr_to_smpage(void *addr) +{ + struct page *frags = (struct page *)PGADDR_2MB(addr); + return &frags[PGN(addr)]; +} + +/** + * smpage_to_addr - gets the address for a small page struct + * @pg: the small page struct + * + * Returns an address + */ +static inline void *smpage_to_addr(struct page *pg) +{ + struct page *frags = (struct page *)PGADDR_2MB(pg); + return (void *)(PGADDR_2MB(pg) + (pg - frags) * PGSIZE_4KB); +} + +/** + * addr_to_page - gets the page struct for an address + * @addr: the address + * + * Returns a pointer to the page struct + */ +static inline struct page *addr_to_page(void *addr) +{ + struct page *pg = addr_to_lgpage(addr); + assert(pg->flags & PAGE_FLAG_LARGE); + + if (pg->flags & PAGE_FLAG_SHATTERED) + return addr_to_smpage(addr); + return pg; +} + +/** + * page_to_addr - gets the address for a page struct + * @pg: the page struct + * + * Returns the address + */ +static inline void *page_to_addr(struct page *pg) +{ + if (pg->flags & PAGE_FLAG_LARGE) + return lgpage_to_addr(pg); + return smpage_to_addr(pg); +} + +/** + * addr_to_pa - gets the physical address of an address in page memory + * @addr: the address of (or in) the page + * + * Returns the physical address, including the offset. + */ +static inline physaddr_t addr_to_pa(void *addr) +{ + struct page *pg = addr_to_lgpage(addr); + return pg->paddr + PGOFF_2MB(addr); +} + +/** + * smpage_to_lgpage - retrieves the large page struct for a 4kb page + * @pg: the page + * + * Returns a pointer to the lgpage struct. + */ +static inline struct page *smpage_to_lgpage(struct page *pg) +{ + assert(!(pg->flags & PAGE_FLAG_LARGE)); + return addr_to_lgpage((void *)pg); +} + +/** + * page_to_size - gets the size of the page (in bytes) + * @pg: the page + * + * Returns the size in bytes. + */ +static inline size_t page_to_size(struct page *pg) +{ + return (pg->flags & PAGE_FLAG_LARGE) ? PGSIZE_2MB : PGSIZE_4KB; +} + +/* + * Page allocation + */ + +/* function attributes to optimize for malloc() behavior */ +#define __page_malloc __malloc __assume_aligned(PGSIZE_4KB) + +extern struct page *page_alloc_on_node(size_t pgsize, int numa_node); +extern struct page *page_alloc(size_t pgsize); +extern struct page *page_zalloc(size_t pgsize); +extern void *page_alloc_addr_on_node(size_t pgsize, int numa_node) __page_malloc; +extern void *page_alloc_addr(size_t pgsize) __page_malloc; +extern void *page_zalloc_addr_on_node(size_t pgsize, int numa_node) __page_malloc; +extern void *page_zalloc_addr(size_t pgsize) __page_malloc; +extern void page_put_addr(void *addr); +extern void page_release(struct kref *ref); + +/** + * page_get - increments the page reference count + * @pg: the page to reference + * + * Returns the page. + */ +static inline struct page *page_get(struct page *pg) +{ + kref_get(&pg->ref); + return pg; +} + +/** + * page_put - decrements the page reference count, freeing it at zero + * @pg: the page to unreference + */ +static inline void page_put(struct page *pg) +{ + kref_put(&pg->ref, page_release); +} + +extern int page_init(void); +extern int page_init_percpu(void); diff --git a/include/skyloft/mm/slab.h b/include/skyloft/mm/slab.h new file mode 100644 index 0000000..d15cdc5 --- /dev/null +++ b/include/skyloft/mm/slab.h @@ -0,0 +1,80 @@ +/* + * slab.h - a SLAB allocator + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +/* forward declarations */ +struct slab_hdr; +struct slab_node; +struct tcache; + +/* + * slab support + */ + +#define SLAB_CHUNK_SIZE 8 +#define SLAB_MIN_SIZE 16 + +/* function attributes for methods that allocate slab items */ +#define __slab_malloc __malloc __assume_aligned(SLAB_MIN_SIZE) + +/* Slab nodes are per-numa node slab internal state. */ +struct slab_node { + size_t size; + int numa_node; + int offset; + int flags; + int nr_elems; + spinlock_t page_lock; + + /* slab pages */ + off_t pg_off; + struct page *cur_pg; + struct list_head full_list; + struct list_head partial_list; + int nr_pages; +}; + +struct slab { + const char *name; + size_t size; + struct list_node link; + struct slab_node *nodes[MAX_NUMA]; +} __aligned(CACHE_LINE_SIZE); + +/* force the slab to be backed with large pages */ +#define SLAB_FLAG_LGPAGE BIT(0) +/* false sharing is okay (less internal fragmentation) */ +#define SLAB_FLAG_FALSE_OKAY BIT(1) +/* managing 4kb pages (internal use only) */ +#define SLAB_FLAG_PAGES BIT(2) + +extern int slab_create(struct slab *s, const char *name, size_t size, int flags); +extern void slab_destroy(struct slab *s); +extern int slab_reclaim(struct slab *s); +extern void *slab_alloc_on_node(struct slab *s, int numa_node) __slab_malloc; +extern void slab_free(struct slab *s, void *item); +extern void slab_print_usage(void); + +/** + * slab_alloc - allocates an item on the local NUMA node + * @s: the slab to allocate from + * + * Returns an item or NULL if out of memory. + */ +static __always_inline void *slab_alloc(struct slab *s) +{ + return slab_alloc_on_node(s, current_numa_node()); +} + +struct tcache *slab_create_tcache(struct slab *s, unsigned int mag_size); + +extern int slab_init(void); diff --git a/include/skyloft/mm/smalloc.h b/include/skyloft/mm/smalloc.h new file mode 100644 index 0000000..6e5bebe --- /dev/null +++ b/include/skyloft/mm/smalloc.h @@ -0,0 +1,47 @@ +/* + * smalloc.h - malloc() based on the SLAB and thread-local item caches + */ + +#pragma once + +#include + +#include + +#define __smalloc_attr __malloc __assume_aligned(16) + +void *smalloc(size_t size) __smalloc_attr; +void *__szalloc(size_t size) __smalloc_attr; +void sfree(void *item); +int smalloc_init(void); +int smalloc_init_percpu(void); + +/** + * szalloc - allocates zeroed memory + * @size: the size of the item + * + * Returns an item or NULL if out of memory. + */ +static __always_inline void *szalloc(size_t size) +{ + if (__builtin_constant_p(size)) { + void *item = smalloc(size); + if (unlikely(!item)) + return NULL; + memset(item, 0, size); + return item; + } + return __szalloc(size); +} + +/** + * smalloc_array - allocates a contiguous array of items + * @n: the number of items + * @size: the size of each item + * + * Returns an item array, or NULL if out of memory. + */ +static __always_inline void *smalloc_array(size_t n, size_t size) +{ + return smalloc(n * size); +} diff --git a/include/skyloft/mm/stack.h b/include/skyloft/mm/stack.h new file mode 100644 index 0000000..e247e0d --- /dev/null +++ b/include/skyloft/mm/stack.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include + +DECLARE_PERCPU(struct tcache_percpu, stack_percpu); + +/** + * stack_alloc - allocates a stack + * + * Stack allocation is extremely cheap, think less than taking a lock. + * + * Returns an unitialized stack. + */ +static inline struct stack *stack_alloc(void) { return tcache_alloc(&percpu_get(stack_percpu)); } + +/** + * stack_free - frees a stack + * @s: the stack to free + */ +static inline void stack_free(struct stack *s) +{ + tcache_free(&percpu_get(stack_percpu), (void *)s); +} + +int stack_init_percpu(); +int stack_init(); diff --git a/include/skyloft/mm/tcache.h b/include/skyloft/mm/tcache.h new file mode 100644 index 0000000..209a30e --- /dev/null +++ b/include/skyloft/mm/tcache.h @@ -0,0 +1,103 @@ +/* + * tcache.h - a generic per-CPU item cache based on magazines + */ + +#pragma once + +#include + +#include +#include +#include +#include +#include + +#define TCACHE_MAX_MAG_SIZE 64 +#define TCACHE_DEFAULT_MAG_SIZE 8 + +struct tcache; + +struct tcache_hdr { + struct tcache_hdr *next_item; + struct tcache_hdr *next_mag; +}; + +#define TCACHE_MIN_ITEM_SIZE sizeof(struct tcache_hdr) + +struct tcache_ops { + int (*alloc)(struct tcache *tc, int nr, void **items); + void (*free)(struct tcache *tc, int nr, void **items); +}; + +struct tcache_percpu { + struct tcache *tc; + unsigned int rounds; + unsigned int capacity; + struct tcache_hdr *loaded; + struct tcache_hdr *previous; +}; + +struct tcache { + const char *name; + const struct tcache_ops *ops; + size_t item_size; + atomic_long mags_allocated; + struct list_node link; + + unsigned int mag_size; + spinlock_t lock; + struct tcache_hdr *shared_mags; + unsigned long data; +}; + +extern void *__tcache_alloc(struct tcache_percpu *ltc); +extern void __tcache_free(struct tcache_percpu *ltc, void *item); + +/* + * stat counters + */ +DECLARE_PERCPU(uint64_t, mag_alloc); +DECLARE_PERCPU(uint64_t, mag_free); +DECLARE_PERCPU(uint64_t, pool_alloc); +DECLARE_PERCPU(uint64_t, pool_free); + +/** + * tcache_alloc - allocates an item from the thread cache + * @ltc: the thread-local cache + * + * Returns an item, or NULL if out of memory. + */ +static inline void *tcache_alloc(struct tcache_percpu *ltc) +{ + void *item = (void *)ltc->loaded; + + if (ltc->rounds == 0) + return __tcache_alloc(ltc); + + ltc->rounds--; + ltc->loaded = ltc->loaded->next_item; + return item; +} + +/** + * tcache_free - frees an item to the thread cache + * @ltc: the thread-local cache + * @item: the item to free + */ +static inline void tcache_free(struct tcache_percpu *ltc, void *item) +{ + struct tcache_hdr *hdr = (struct tcache_hdr *)item; + + if (ltc->rounds >= ltc->capacity) + return __tcache_free(ltc, item); + + ltc->rounds++; + hdr->next_item = ltc->loaded; + ltc->loaded = hdr; +} + +extern struct tcache *tcache_create(const char *name, const struct tcache_ops *ops, + unsigned int mag_size, size_t item_size); +extern void tcache_init_percpu(struct tcache *tc, struct tcache_percpu *ltc); +extern void tcache_reclaim(struct tcache *tc); +extern void tcache_print_usage(void); diff --git a/include/skyloft/net.h b/include/skyloft/net.h new file mode 100644 index 0000000..d949eaa --- /dev/null +++ b/include/skyloft/net.h @@ -0,0 +1,189 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct rx_net_hdr; + +struct netaddr { + uint32_t ip; + uint16_t port; +}; + +#define MAX_ARP_STATIC_ENTRIES 1024 +struct arp_static_entry { + uint32_t ip; + struct eth_addr addr; +}; +extern int arp_static_count; +extern struct arp_static_entry static_entries[MAX_ARP_STATIC_ENTRIES]; + +void net_rx_softirq(struct rx_net_hdr **hdrs, int nr); + +/* + * Network Error Reporting Functions + */ + +void trans_error(struct mbuf *m, int err); +void net_error(struct mbuf *m, int err); + +/* + * Network Dump Functions + */ + +void dump_eth_pkt(int loglvl, struct eth_hdr *hdr); +void dump_arp_pkt(int loglvl, struct arp_hdr *arphdr, struct arp_hdr_ethip *ethip); +void dump_udp_pkt(int loglvl, uint32_t saddr, struct udp_hdr *udp_hdr, void *data); +char *ip_addr_to_str(uint32_t addr, char *str); + +/* + * RX Networking Functions + */ + +void net_rx_arp(struct mbuf *m); +void net_rx_icmp(struct mbuf *m, const struct ip_hdr *iphdr, uint16_t len); +void net_rx_trans(struct mbuf **ms, int nr); +void tcp_rx_closed(struct mbuf *m); + +/* + * TX Networking Functions + */ + +void arp_send(uint16_t op, struct eth_addr dhost, uint32_t daddr); +int arp_lookup(uint32_t daddr, struct eth_addr *dhost_out, struct mbuf *m) __must_use_return; +struct mbuf *net_tx_alloc_mbuf(void); +void net_tx_release_mbuf(struct mbuf *m); +int net_tx_eth(struct mbuf *m, uint16_t proto, struct eth_addr dhost) __must_use_return; +int net_tx_ip(struct mbuf *m, uint8_t proto, uint32_t daddr) __must_use_return; +int net_tx_ip_burst(struct mbuf **ms, int n, uint8_t proto, uint32_t daddr) __must_use_return; +int net_tx_icmp(struct mbuf *m, uint8_t type, uint8_t code, uint32_t daddr, uint16_t id, + uint16_t seq) __must_use_return; + +/** + * net_tx_eth - transmits an ethernet packet, or frees it on failure + * @m: the mbuf to transmit + * @type: the ethernet type (in native byte order) + * @dhost: the destination MAC address + * + * The payload must start with the network (L3) header. The ethernet (L2) + * header will be prepended by this function. + * + * @m must have been allocated with net_tx_alloc_mbuf(). + */ +static inline void net_tx_eth_or_free(struct mbuf *m, uint16_t type, struct eth_addr dhost) +{ + if (unlikely(net_tx_eth(m, type, dhost) != 0)) + mbuf_free(m); +} + +/** + * net_tx_ip - transmits an IP packet, or frees it on failure + * @m: the mbuf to transmit + * @proto: the transport protocol + * @daddr: the destination IP address (in native byte order) + * + * The payload must start with the transport (L4) header. The IPv4 (L3) and + * ethernet (L2) headers will be prepended by this function. + * + * @m must have been allocated with net_tx_alloc_mbuf(). + */ +static inline void net_tx_ip_or_free(struct mbuf *m, uint8_t proto, uint32_t daddr) +{ + if (unlikely(net_tx_ip(m, proto, daddr) != 0)) + mbuf_free(m); +} + +/* + * Transport protocol layer + */ + +enum { + /* match on protocol, source IP and port */ + TRANS_MATCH_3TUPLE = 0, + /* match on protocol, source IP and port + dest IP and port */ + TRANS_MATCH_5TUPLE, +}; + +struct trans_entry; + +struct trans_ops { + /* receive an ingress packet */ + void (*recv)(struct trans_entry *e, struct mbuf *m); + /* propagate a network error */ + void (*err)(struct trans_entry *e, int err); +}; + +struct trans_entry { + int match; + uint8_t proto; + struct netaddr laddr; + struct netaddr raddr; + struct rcu_hlist_node link; + struct rcu_head rcu; + const struct trans_ops *ops; +}; + +/** + * trans_init_3tuple - initializes a transport layer entry (3-tuple match) + * @e: the entry to initialize + * @proto: the IP protocol + * @ops: operations to handle matching flows + * @laddr: the local address + */ +static inline void trans_init_3tuple(struct trans_entry *e, uint8_t proto, + const struct trans_ops *ops, struct netaddr laddr) +{ + e->match = TRANS_MATCH_3TUPLE; + e->proto = proto; + e->laddr = laddr; + e->ops = ops; +} + +/** + * trans_init_5tuple - initializes a transport layer entry (5-tuple match) + * @e: the entry to initialize + * @proto: the IP protocol + * @ops: operations to handle matching flows + * @laddr: the local address + * @raddr: the remote address + */ +static inline void trans_init_5tuple(struct trans_entry *e, uint8_t proto, + const struct trans_ops *ops, struct netaddr laddr, + struct netaddr raddr) +{ + e->match = TRANS_MATCH_5TUPLE; + e->proto = proto; + e->laddr = laddr; + e->raddr = raddr; + e->ops = ops; +} + +int trans_table_add(struct trans_entry *e); +int trans_table_add_with_ephemeral_port(struct trans_entry *e); +void trans_table_remove(struct trans_entry *e); + +int net_init(void); +int arp_init(void); +int arp_init_late(void); +int trans_init(void); +int net_init_percpu(void); +int arp_init_percpu(void); + +/* + * Ping support + */ + +struct ping_payload { + struct timeval tx_time; +}; + +int net_ping_init(); +void net_send_ping(uint16_t seq_num, uint32_t daddr); +void net_recv_ping(const struct ping_payload *payload, const struct icmp_pkt *icmp_pkt); diff --git a/include/skyloft/percpu.h b/include/skyloft/percpu.h new file mode 100644 index 0000000..d124e4c --- /dev/null +++ b/include/skyloft/percpu.h @@ -0,0 +1,78 @@ +/* + * percpu.h - percpu data and other utilities + */ + +#pragma once + +#include +#include + +#include +#include + +/* used to define percpu variables */ +#define DEFINE_PERCPU(type, name) \ + typeof(type) __percpu_##name __percpu __attribute__((section(".percpu,\"\",@nobits#"))) + +/* used to make percpu variables externally available */ +#define DECLARE_PERCPU(type, name) extern DEFINE_PERCPU(type, name) + +extern __thread void *percpu_ptr; +extern unsigned int thread_count; +extern const char __percpu_start[]; + +declear_cpu_array(void *, percpu_offsets, USED_CPUS); + +/** + * percpu_get_remote - get a percpu variable on a specific CPU + * @var: the percpu variable + * @cpu_id: the CPU id + * + * Returns a percpu variable. + */ +#define percpu_get_remote(var, cpu_id) \ + (*((__force typeof(__percpu_##var) *)((uintptr_t) & __percpu_##var + \ + (uintptr_t)percpu_offsets[cpu_id] - \ + (uintptr_t)__percpu_start))) + +static inline void *__percpu_get(void __percpu *key) +{ + return (__force void *)((uintptr_t)key + (uintptr_t)percpu_ptr - (uintptr_t)__percpu_start); +} + +/** + * percpu_get - get the local percpu variable + * @var: the percpu variable + * + * Returns a percpu variable. + */ +#define percpu_get(var) (*((typeof(__percpu_##var) *)(__percpu_get(&__percpu_##var)))) + +/** + * thread_is_active - is the thread initialized? + * @thread: the thread id + * + * Returns true if yes, false if no. + */ +#define thread_is_active(thread) (percpu_offsets[thread] != NULL) + +static inline int __thread_next_active(int thread) +{ + while (thread < (int)thread_count) { + if (thread_is_active(++thread)) + return thread; + } + + return thread; +} + +/** + * for_each_thread - iterates over each thread + * @thread: the thread id + */ +#define for_each_thread(thread) \ + for ((thread) = -1; (thread) = __thread_next_active(thread), (thread) < thread_count;) + +extern __thread bool thread_init_done; + +extern int percpu_init(void); diff --git a/include/skyloft/platform.h b/include/skyloft/platform.h new file mode 100644 index 0000000..3f0f7aa --- /dev/null +++ b/include/skyloft/platform.h @@ -0,0 +1,122 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +int platform_init(); +int platform_init_percpu(); +int cpubind_init_percpu(); + +/* CPU */ + +static inline int hw_cpu_id(int cpu_id) +{ + declear_cpu_array(int, hw_cpu_list, USED_CPUS); + if (cpu_id >= USED_CPUS) { + return -1; + } + return hw_cpu_list[cpu_id]; +} + +static inline int cpu_sibling(int cpu_id) +{ + declear_cpu_array(int, cpu_siblings, USED_CPUS); + return cpu_siblings[cpu_id]; +} + +static inline int cpu_numa_node(int cpu_id) { return numa_node_of_cpu(hw_cpu_id(cpu_id)); } + +static inline pid_t _gettid() { return syscall(SYS_gettid); } + +int bind_to_cpu(int tid, int cpu_id); +int unbind_cpus(int tid); + +/* Memory */ + +typedef unsigned int mem_key_t; + +void touch_mapping(void *base, size_t len, size_t pgsize); +void *mem_map_anom(void *base, size_t len, size_t pgsize, int node); +void *mem_map_shm_file(const char *path, void *base, size_t len, size_t pgsize, int node); +void *mem_map_shm(mem_key_t key, void *base, size_t len, size_t pgsize, bool exclusive); +int mem_unmap_shm(void *base); + +typedef unsigned long physaddr_t; /* physical addresses */ +typedef unsigned long virtaddr_t; /* virtual addresses */ + +int mem_lookup_page_phys_addrs(void *addr, size_t len, size_t pgsize, physaddr_t *maddrs); + +static inline int mem_lookup_page_phys_addr(void *addr, size_t pgsize, physaddr_t *paddr) +{ + return mem_lookup_page_phys_addrs(addr, pgsize, pgsize, paddr); +} + +physaddr_t mem_virt2phys(void *addr); + +/* Remote FD */ + +static inline int pidfd_open(pid_t pid) { return syscall(SYS_pidfd_open, pid, 0); } + +static inline int pidfd_getfd(int pidfd, int targetfd) +{ + return syscall(SYS_pidfd_getfd, pidfd, targetfd, 0); +} + +/* Kernel module API */ + +int skyloft_park_on_cpu(int cpu); +int skyloft_wakeup(pid_t target_tid); +int skyloft_switch_to(pid_t target_tid); + +/* User interrupt */ +#ifdef SKYLOFT_UINTR + +#include + +#define UVEC 1 + +static __always_inline __attribute__((target("general-regs-only"))) int uintr_index() +{ + extern __thread int g_uintr_index; + return g_uintr_index; +} + +static inline uint64_t rdfsbase() +{ + uint64_t val; + asm volatile("rdfsbase %0" : "=r"(val)); + return val; +} + +#define local_irq_disable _clui +#define local_irq_enable _stui +#define local_irq_enabled _testui + +#define local_irq_save(flags) \ + do { \ + flags = _testui(); \ + _clui(); \ + } while (0) +#define local_irq_restore(flags) \ + do { \ + if (flags) \ + _stui(); \ + } while (0) + +int skyloft_setup_device_uintr(int flags); +int skyloft_timer_set_hz(int hz); +#else +#define local_irq_disable() +#define local_irq_enable() +#define local_irq_enabled() false +#define local_irq_save(flags) ((void)flags) +#define local_irq_restore(flags) ((void)flags) +#endif diff --git a/include/skyloft/sched.h b/include/skyloft/sched.h new file mode 100644 index 0000000..d091cab --- /dev/null +++ b/include/skyloft/sched.h @@ -0,0 +1,192 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +struct kthread; +struct task; +struct proc; + +__noreturn void start_schedule(void); +__noreturn __noinline void schedule(); + +/* init handlers */ +int sched_init_percpu(void); +int sched_init(void); +int proc_init(void); + +/* softirq APIs */ +struct task *softirq_task(struct kthread *k, int budget); +bool softirq_run(int budget); + +/* task APIs */ +int task_spawn(int cpu_id, void (*fn)(void *arg), void *arg, int stack_size); +int task_enqueue(int cpu_id, struct task *task); +void task_yield(); +void task_wakeup(struct task *); +void task_block(spinlock_t *lock); +__noreturn void task_exit(void *code); + +/* assembly helper routines from switch.S */ +extern void __context_switch(uint64_t *prev_stack, uint64_t next_stack, uint8_t *prev_stack_busy); +extern void __context_switch_to_idle(uint64_t *prev_stack, uint64_t idle_stack); +extern void __context_switch_from_idle(uint64_t next_stack); +extern void __context_switch_to_fn_nosave(void (*fn)(void), uint64_t idle_stack); + +struct kthread { + /* 1st cacheline */ + spinlock_t lock; + /* pthread handle */ + pthread_t ph; + pid_t tid; + /* app id */ + int app; + /* logic cpu id */ + int cpu; + /* numa node id */ + int node; + /* kernel thread states */ + bool parked; + /* RCU generation number */ + uint8_t pad0[28]; + + /* 2nd cacheline */ + struct mbufq txpktq_overflow; + struct mbufq txcmdq_overflow; + uint8_t pad1[32]; + + /* 3rd cacheline */ + /* kernel thread local timer */ + spinlock_t timer_lock; + int32_t nr_timers; + struct timer_idx *timers; + uint8_t pad2[48]; + + /* 4th-6th cacheline */ + /* per-CPU communication channel */ + struct lrpc_chan rxq; + struct lrpc_chan txpktq; + struct lrpc_chan txcmdq; + + /* 7th cacheline */ + /* statistics counters */ + uint64_t stats[STAT_NR]; +} __aligned_cacheline; + +BUILD_ASSERT(offsetof(struct kthread, txpktq_overflow) == 64); +BUILD_ASSERT(offsetof(struct kthread, timer_lock) == 128); +BUILD_ASSERT(offsetof(struct kthread, rxq) == 192); +BUILD_ASSERT(offsetof(struct kthread, stats) == 192 + 64 * 6); + +struct proc { + /* global application identification */ + int id; + pid_t pid; + int nr_ks; + + /* boot time (us) */ + __nsec boot_time; + + /* process states */ + volatile bool ready; + volatile bool exited; + + /* kernel threads bound to CPU */ + struct kthread all_ks[USED_CPUS]; + struct kthread *active_ks[USED_CPUS]; + atomic_int nr_active; + uint32_t next_rr; + + /* overflow queue for completion data */ + uint32_t max_overflows; + uint32_t nr_overflows; + uint64_t *overflow_queue; /* a pionter to daemon space */ +} __aligned_cacheline; + +extern struct proc *proc; +extern __thread struct kthread *localk; +extern __thread volatile unsigned int preempt_cnt; + +/** + * task_self - current running task + */ +static __always_inline struct task *task_self() +{ + extern __thread struct task *__curr; + assert(__curr != NULL); + return __curr; +} + +/** + * preempt_disable - disables preemption + * + * Can be nested. + */ +static __always_inline void preempt_disable(void) { preempt_cnt++; } + +/** + * preempt_enable - reenables preemption + * + * Can be nested. + */ +static __always_inline void preempt_enable(void) { preempt_cnt--; } + +/** + * preempt_enabled - returns true if preemption is enabled + */ +static __always_inline __attribute__((target("general-regs-only"))) bool preempt_enabled(void) +{ + return !preempt_cnt; +} + +#define assert_preempt_disabled() assert(!(local_irq_enabled() && preempt_enabled())) +#define assert_local_irq_disabled() assert(!local_irq_enabled()) + +/** + * thisk - returns the per-kernel-thread data + */ +static __always_inline __attribute__((target("general-regs-only"))) struct kthread *thisk(void) +{ + return localk; +} + +/** + * cpuk - returns the per-kernel-thread data of the cpu + */ +static inline struct kthread *cpuk(int cpu_id) { return &proc->all_ks[cpu_id]; } + +/** + * getk - returns the per-kernel-thread data and disables preemption + * + * WARNING: If you're using myk() instead of getk(), that's a bug if preemption + * is enabled. The local kthread can change at anytime. + */ +static __always_inline struct kthread *getk(void) +{ + preempt_disable(); + return localk; +} + +/** + * putk - reenables preemption after calling getk() + */ +static __always_inline void putk(void) { preempt_enable(); } + +static inline int current_app_id() { return proc->id; } + +static inline int current_cpu_id() { return thisk()->cpu; } + +static inline struct proc *current_app() { return &shm_apps[current_app_id()]; } + +static inline bool is_daemon() { return current_app_id() == DAEMON_APP_ID; } + +static inline int current_numa_node() { return thisk()->node; } diff --git a/include/skyloft/sched/ops.h b/include/skyloft/sched/ops.h new file mode 100644 index 0000000..d6c0686 --- /dev/null +++ b/include/skyloft/sched/ops.h @@ -0,0 +1,69 @@ +/* + * ops.h: Skyloft general operations + */ + +#pragma once + +#if defined(SKYLOFT_SCHED_FIFO) +#include "policy/fifo.h" +#define SCHED_NAME "fifo" +#define SCHED_OP(op_name) fifo_##op_name +#elif defined(SKYLOFT_SCHED_FIFO2) +#include "policy/rr.h" +#define SCHED_NAME "fifo" +#define SCHED_OP(op_name) fifo_##op_name +#elif defined(SKYLOFT_SCHED_CFS) +#include "policy/cfs.h" +#define SCHED_NAME "cfs" +#define SCHED_OP(op_name) cfs_##op_name +#elif defined(SKYLOFT_SCHED_SQ) +#include "policy/sq.h" +#define SCHED_NAME "sq" +#define SCHED_OP(op_name) sq_##op_name +#elif defined(SKYLOFT_SCHED_SQ_LCBE) +#include "policy/sq_lcbe.h" +#define SCHED_NAME "sq" +#define SCHED_OP(op_name) sq_##op_name +#endif + +#ifndef SCHED_DATA_SIZE +#define SCHED_DATA_SIZE (2 * 1024 * 1024) +#endif + +#ifndef SCHED_PERCPU_DATA_SIZE +#define SCHED_PERCPU_DATA_SIZE (2 * 1024 * 1024) +#endif + +#define __sched_name SCHED_NAME + +static inline int __sched_init(void *data) { return SCHED_OP(sched_init)(data); } +static inline int __sched_init_percpu(void *percpu_data) +{ + return SCHED_OP(sched_init_percpu)(percpu_data); +} + +static inline int __sched_init_task(struct task *task) { return SCHED_OP(sched_init_task)(task); } +static inline void __sched_finish_task(struct task *task) { SCHED_OP(sched_finish_task)(task); } + +static inline int __sched_spawn(struct task *task, int cpu) +{ + return SCHED_OP(sched_spawn)(task, cpu); +} + +static inline struct task *__sched_pick_next() { return SCHED_OP(sched_pick_next)(); } +static inline void __sched_block() { SCHED_OP(sched_block)(); } +static inline void __sched_wakeup(struct task *task) { SCHED_OP(sched_wakeup)(task); } +static inline void __sched_yield() { SCHED_OP(sched_yield)(); } +static inline void __sched_percpu_lock(int cpu) { SCHED_OP(sched_percpu_lock)(cpu); } +static inline void __sched_percpu_unlock(int cpu) { SCHED_OP(sched_percpu_unlock)(cpu); } + +static inline void __sched_balance() { SCHED_OP(sched_balance)(); } +static inline void __sched_poll() { SCHED_OP(sched_poll)(); } + +static __always_inline __attribute__((target("general-regs-only"))) bool __sched_preempt() +{ + return SCHED_OP(sched_preempt)(); +} + +static inline int __sched_set_params(void *params) { return SCHED_OP(sched_set_params)(params); } +static inline void __sched_dump_tasks() { SCHED_OP(sched_dump_tasks)(); } diff --git a/include/skyloft/sched/policy/cfs.h b/include/skyloft/sched/policy/cfs.h new file mode 100644 index 0000000..33966b3 --- /dev/null +++ b/include/skyloft/sched/policy/cfs.h @@ -0,0 +1,87 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include + +#include "dummy.h" + +/* CFS load weight: weight * inv_weight = 2^32 */ +struct load_weight { + uint64_t weight; + uint32_t inv_weight; +}; + +/* CFS percpu state */ +struct cfs_rq { + spinlock_t lock; + /* sum of load of all tasks */ + struct load_weight load; + uint32_t nr_running; + /* rbtree root with leftmost node cached */ + struct rb_root_cached tasks_timeline; + uint64_t min_vruntime; + struct cfs_task *curr; +} __aligned_cacheline; + +struct cfs_task { + int last_run; + struct load_weight load; + /* rbtree link */ + struct rb_node run_node; + /* task states */ + bool on_rq; + /* task scheduled time */ + __nsec exec_start; + __nsec sum_exec_runtime; + __nsec prev_sum_exec_runtime; + uint64_t vruntime; +} __aligned_cacheline; + +#define WEIGHT_IDLEPRIO 3 +#define WMULT_IDLEPRIO 1431655765 +#define WMULT_CONST (~0U) +#define WMULT_SHIFT 32 +#define NICE_0_SHIFT 10 +#define NICE_0_LOAD (1L << NICE_0_SHIFT) + +DECLARE_PERCPU(struct cfs_rq *, percpu_rqs); + +#define this_rq() percpu_get(percpu_rqs) +#define cpu_rq(cpu) percpu_get_remote(percpu_rqs, cpu) +#define cfs_task_of(task) ((struct cfs_task *)task->policy_task_data) +#define task_of(task) (container_of((void *)task, struct task, policy_task_data)) + +#define cfs_sched_init dummy_sched_init +#define cfs_sched_set_params dummy_sched_set_params +#define cfs_sched_poll dummy_sched_poll +#define cfs_sched_dump_tasks dummy_sched_dump_tasks +#define cfs_sched_balance dummy_sched_balance + +static inline void cfs_sched_percpu_lock(int cpu) { spin_lock(&cpu_rq(cpu)->lock); } + +static inline void cfs_sched_percpu_unlock(int cpu) { spin_unlock(&cpu_rq(cpu)->lock); } + +static inline int cfs_sched_init_percpu(void *percpu_data) +{ + struct cfs_rq *cfs_rq = percpu_data; + percpu_get(percpu_rqs) = cfs_rq; + cfs_rq->curr = NULL; + cfs_rq->tasks_timeline = RB_ROOT_CACHED; + cfs_rq->min_vruntime = (uint64_t)(-(1LL << 20)); + cfs_rq->nr_running = 0; + cfs_rq->load.weight = cfs_rq->load.inv_weight = 0; + return 0; +} + +struct task *cfs_sched_pick_next(); +int cfs_sched_spawn(struct task *, int); +void cfs_sched_yield(); +void cfs_sched_wakeup(struct task *); +bool cfs_sched_preempt(); +int cfs_sched_init_task(struct task *); +void cfs_sched_finish_task(struct task *); diff --git a/include/skyloft/sched/policy/dummy.h b/include/skyloft/sched/policy/dummy.h new file mode 100644 index 0000000..5c8d921 --- /dev/null +++ b/include/skyloft/sched/policy/dummy.h @@ -0,0 +1,21 @@ +#pragma once + +static inline int dummy_sched_init(void *data) { return 0; } +static inline int dummy_sched_init_percpu(void *percpu_data) { return 0; } +static inline int dummy_sched_init_task(struct task *task) { return 0; } +static inline void dummy_sched_finish_task(struct task *task) {} + +static inline int dummy_sched_spawn(struct task *task, int cpu) { return 0; } +static inline struct task *dummy_sched_pick_next() { return 0; } +static inline void dummy_sched_block() {} +static inline void dummy_sched_wakeup(struct task *task) {} +static inline void dummy_sched_yield() {} +static inline void dummy_sched_percpu_lock(int cpu) {} +static inline void dummy_sched_percpu_unlock(int cpu) {} + +static inline void dummy_sched_balance() {} +static inline bool dummy_sched_preempt() { return false; } +static inline void dummy_sched_poll() {} + +static inline int dummy_sched_set_params(void *params) { return 0; } +static inline void dummy_sched_dump_tasks() {} diff --git a/include/skyloft/sched/policy/fifo.h b/include/skyloft/sched/policy/fifo.h new file mode 100644 index 0000000..00096fa --- /dev/null +++ b/include/skyloft/sched/policy/fifo.h @@ -0,0 +1,66 @@ +#pragma once + +#include +#include +#include + +#include "dummy.h" + +struct fifo_rq { + struct kthread *k; + uint32_t head, tail; + struct list_head overflow; + uint8_t pad0[32]; + spinlock_t lock; + uint8_t pad1[60]; + /* cache line 2~5 */ + struct task *tasks[RUNTIME_RQ_SIZE]; +} __aligned_cacheline; + +extern __thread struct fifo_rq *this_rq; +extern struct fifo_rq *rqs[USED_CPUS]; + +#define this_rq() (this_rq) +#define cpu_rq(cpu) (rqs[cpu]) + +#define RQ_SIZE_MASK (RUNTIME_RQ_SIZE - 1) +#define RQ_HEAD(rq) ((rq)->tasks[(rq)->head & RQ_SIZE_MASK]) +#define RQ_TAIL(rq) ((rq)->tasks[(rq)->tail & RQ_SIZE_MASK]) +#define RQ_IS_EMPTY(rq) ((rq)->head == (rq)->tail) + +static inline struct task *fifo_sched_pick_next() +{ + struct task *task; + struct fifo_rq *rq = this_rq(); + + if (RQ_IS_EMPTY(rq)) + return NULL; + + task = RQ_HEAD(rq); + rq->head++; + + return task; +} + +static inline void fifo_sched_percpu_lock(int cpu) { spin_lock(&cpu_rq(cpu)->lock); } + +static inline void fifo_sched_percpu_unlock(int cpu) { spin_unlock(&cpu_rq(cpu)->lock); } + +static __always_inline __attribute__((target("general-regs-only"))) bool fifo_sched_preempt() +{ + return true; +} + +#define fifo_sched_init dummy_sched_init +#define fifo_sched_init_task dummy_sched_init_task +#define fifo_sched_finish_task dummy_sched_finish_task +#define fifo_sched_block dummy_sched_block +#define fifo_sched_set_params dummy_sched_set_params +#define fifo_sched_poll dummy_sched_poll +#define fifo_sched_dump_tasks dummy_sched_dump_tasks + +int fifo_sched_init_percpu(void *percpu_data); +int fifo_sched_spawn(struct task *task, int cpu); +void fifo_sched_yield(); +void fifo_sched_wakeup(struct task *task); +void fifo_sched_balance(); diff --git a/include/skyloft/sched/policy/rr.h b/include/skyloft/sched/policy/rr.h new file mode 100644 index 0000000..a4ea3f8 --- /dev/null +++ b/include/skyloft/sched/policy/rr.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include +#include +#include + +#include "dummy.h" + +struct fifo_rq { + uint32_t head; + atomic_int tail; + struct kthread *k; + atomic_int num_tasks; + uint8_t pad0[44]; + /* cache line 2~5 */ + struct task *tasks[RUNTIME_RQ_SIZE]; +} __aligned_cacheline; + +struct fifo_task { + int last_run; + int quan; +}; + +#define fifo_task_of(task) ((struct fifo_task *)(task->policy_task_data)) +DECLARE_PERCPU(struct fifo_rq *, rqs); +#define this_rq() percpu_get(rqs) +#define cpu_rq(cpu) percpu_get_remote(rqs, cpu) + +#define RQ_SIZE_MASK (RUNTIME_RQ_SIZE - 1) + +struct task *fifo_sched_pick_next(); +int fifo_sched_init_percpu(void *percpu_data); +int fifo_sched_spawn(struct task *task, int cpu); +void fifo_sched_yield(); +void fifo_sched_wakeup(struct task *task); +bool fifo_sched_preempt(); + +static inline int fifo_sched_init_task(struct task *task) +{ + task->allow_preempt = true; + return 0; +} + +#define fifo_sched_init dummy_sched_init +#define fifo_sched_finish_task dummy_sched_finish_task +#define fifo_sched_block dummy_sched_block +#define fifo_sched_set_params dummy_sched_set_params +#define fifo_sched_poll dummy_sched_poll +#define fifo_sched_dump_tasks dummy_sched_dump_tasks +#define fifo_sched_percpu_lock dummy_sched_percpu_lock +#define fifo_sched_percpu_unlock dummy_sched_percpu_unlock +#define fifo_sched_balance dummy_sched_balance diff --git a/include/skyloft/sched/policy/sq.h b/include/skyloft/sched/policy/sq.h new file mode 100644 index 0000000..21a6d37 --- /dev/null +++ b/include/skyloft/sched/policy/sq.h @@ -0,0 +1,104 @@ +/* + * sq.h: single queue c-FCFS scheduler + */ + +#pragma once + +#include +#include +#include + +#include +#include +#include + +#include "dummy.h" + +#define SQ_UVEC 1 + +enum sq_worker_state { + WORKER_IDLE, + WORKER_QUEUING, + WORKER_RUNNING, + WORKER_PREEMPTED, + WORKER_FINISHED, +}; + +struct sq_worker { + enum sq_worker_state state; + struct task *cur_task; + __nsec start_time; + uint8_t pad0[40]; + int uintr_fd; + int uintr_index; +} __aligned_cacheline; + +struct sq_params { + int num_workers; + int preemption_quantum; +}; + +struct sq_dispatcher { + /* Centric queue */ + queue_t pending_tasks; + /* Maximum number of workers */ + int num_workers; + /* If not set, tasks will run to complete */ + int preemption_quantum; +}; + +DECLARE_PERCPU(struct sq_worker *, workers); + +#define this_worker() percpu_get(workers) +#define cpu_worker(cpu) percpu_get_remote(workers, (cpu)) + +static inline bool sq_sched_preempt() +{ + struct sq_worker *worker = this_worker(); + if (atomic_load_acq(&worker->state) == WORKER_RUNNING) { + atomic_store_rel(&worker->state, WORKER_PREEMPTED); + return true; + } else { + return false; + } +} + +static inline struct task *sq_sched_pick_next() +{ + struct sq_worker *worker = this_worker(); + if (atomic_load_acq(&worker->state) == WORKER_QUEUING) { + worker->start_time = now_ns(); + atomic_store_rel(&worker->state, WORKER_RUNNING); + return worker->cur_task; + } else + return NULL; +} + +static inline void sq_sched_finish_task(struct task *task) +{ + atomic_store_rel(&this_worker()->state, WORKER_FINISHED); +} + +static inline void sq_sched_yield() +{ + struct sq_worker *worker = this_worker(); + if (atomic_load_acq(&worker->state) == WORKER_RUNNING) + atomic_store_rel(&worker->state, WORKER_QUEUING); +} + +void sq_sched_poll(); +int sq_sched_init(void *data); +int sq_sched_init_percpu(void *percpu_data); +int sq_sched_spawn(struct task *task, int cpu); +int sq_sched_set_params(void *params); + +#define sq_sched_init_task dummy_sched_init_task +#define sq_sched_block dummy_sched_block +#define sq_sched_wakeup dummy_sched_wakeup +#define sq_sched_percpu_lock dummy_sched_percpu_lock +#define sq_sched_percpu_unlock dummy_sched_percpu_unlock +#define sq_sched_balance dummy_sched_balance +#define sq_sched_dump_tasks dummy_sched_dump_tasks + +#define SCHED_DATA_SIZE (sizeof(struct sq_dispatcher)) +#define SCHED_PERCPU_DATA_SIZE (sizeof(struct sq_worker)) diff --git a/include/skyloft/sched/policy/sq_lcbe.h b/include/skyloft/sched/policy/sq_lcbe.h new file mode 100644 index 0000000..52580d2 --- /dev/null +++ b/include/skyloft/sched/policy/sq_lcbe.h @@ -0,0 +1,125 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +#include "dummy.h" + +#define SQ_UVEC 1 +#define SQ_LC 0 /* LC default app ID */ +#define SQ_BE 1 /* BE default app ID */ + +enum sq_worker_state { + WORKER_IDLE, + WORKER_QUEUING, + WORKER_RUNNING, + WORKER_PREEMPTED, + WORKER_FINISHED, +}; + +struct sq_worker { + enum sq_worker_state state; + struct task *cur_task; + __nsec start_time; + uint8_t pad0[40]; + int uintr_fd; + int uintr_index; +} __aligned_cacheline; + +struct sq_cpu { + struct sq_worker lc; + struct sq_worker be; + bool need_sched; + bool is_lc; +}; + +struct sq_params { + int num_workers; + int preemption_quantum; + int guaranteed_cpus; + int adjust_quantum; + double congestion_thresh; +}; + +struct sq_dispatcher { + /* Centric queue */ + queue_t pending_tasks; + /* Maximum number of workers */ + int num_workers; + /* If not set, tasks will run to complete */ + __nsec preemption_quantum; + /* BE process ID */ + pid_t be_pid; + /* Dispatcher will wait for ready flags */ + bool be_ready[USED_CPUS]; + bool lc_ready[USED_CPUS]; + /* CPUs used by LC. */ + DEFINE_BITMAP(lc_cpus, USED_CPUS); + unsigned int lc_nr_cpus; + /* Guaranteed CPUs used by LC */ + unsigned int lc_guaranteed_cpus; + /* Core allocation */ + __nsec last_adjust; + __nsec adjust_quantum; + /* Congestion threshold */ + double congestion_thresh; +}; + +struct sq_task { + /* Time when task is created and pushed */ + __nsec ingress; + /* Time when task is popped */ + __nsec start; + /* Elapsed running time */ + __nsec active; +}; +BUILD_ASSERT(sizeof(struct sq_task) <= POLICY_TASK_DATA_SIZE); + +DECLARE_PERCPU(struct sq_cpu *, sq_cpus); + +static inline struct sq_cpu *this_sq_cpu() { return percpu_get(sq_cpus); } + +static inline struct sq_worker *this_worker() +{ + struct sq_cpu *cpu = this_sq_cpu(); + return cpu->is_lc ? &cpu->lc : &cpu->be; +} + +static inline struct sq_cpu *sq_cpu(int cpu_id) { return percpu_get_remote(sq_cpus, cpu_id); } + +static inline struct sq_worker *cpu_worker(int cpu_id) +{ + struct sq_cpu *cpu = sq_cpu(cpu_id); + return cpu->is_lc ? &cpu->lc : &cpu->be; +} + +static inline void init_worker(struct sq_worker *worker) +{ + worker->cur_task = NULL; + worker->state = WORKER_IDLE; +} + +struct task *sq_sched_pick_next(); +void sq_sched_finish_task(struct task *task); +void sq_sched_poll(); +int sq_sched_init(void *data); +int sq_sched_init_percpu(void *percpu_data); +int sq_sched_spawn(struct task *task, int cpu); +int sq_sched_set_params(void *params); +void sq_sched_dump_tasks(); + +#define sq_sched_init_task dummy_sched_init_task +#define sq_sched_block dummy_sched_block +#define sq_sched_wakeup dummy_sched_wakeup +#define sq_sched_percpu_lock dummy_sched_percpu_lock +#define sq_sched_percpu_unlock dummy_sched_percpu_unlock +#define sq_sched_balance dummy_sched_balance + +#define SCHED_DATA_SIZE (sizeof(struct sq_dispatcher)) +#define SCHED_PERCPU_DATA_SIZE (sizeof(struct sq_worker)) \ No newline at end of file diff --git a/include/skyloft/stat.h b/include/skyloft/stat.h new file mode 100644 index 0000000..a40d66d --- /dev/null +++ b/include/skyloft/stat.h @@ -0,0 +1,86 @@ +#pragma once + +#include + +/* + * These are per-kthread stat counters. It's recommended that most counters be + * monotonically increasing, as that decouples the counters from any particular + * collection time period. However, it may not be possible to represent all + * counters this way. + * + * Don't use these enums directly. Instead, use the STAT() macro. + */ +enum { + /* scheduler counters */ + STAT_LOCAL_SPAWNS = 0, + STAT_SWITCH_TO, + STAT_TASKS_STOLEN, + STAT_IDLE, + STAT_IDLE_CYCLES, + // STAT_WAKEUP, + // STAT_WAKEUP_CYCLES, + STAT_SOFTIRQS_LOCAL, + STAT_SOFTIRQ_CYCLES, + STAT_ALLOC, + STAT_ALLOC_CYCLES, + STAT_RX, + STAT_TX, +#ifdef SKYLOFT_UINTR + STAT_UINTR, + // STAT_UINTR_CYCLES, +#endif + + /* total number of counters */ + STAT_NR, +}; + +static const char *STAT_STR[] = { + "local_spawns", + "switch_to", + "tasks_stolen", + "idle", + "idle_cycles", + // "wakeup", + // "wakeup_cycles", + "softirqs_local", + "softirq_cycles", + "alloc", + "alloc_cycles", + "rx", + "tx", +#ifdef SKYLOFT_UINTR + "uintr", + // "uintr_cycles", +#endif +}; + +BUILD_ASSERT(ARRAY_SIZE(STAT_STR) == STAT_NR); + +static inline const char *stat_str(int idx) +{ + return STAT_STR[idx]; +} + +/** + * STAT - gets a stat counter + * + * e.g. STAT(DROPS)++; + * + * Deliberately could race with preemption. + */ +#define ADD_STAT_FORCE(counter, val) (thisk()->stats[STAT_##counter] += val) +#ifdef SKYLOFT_STAT +#define ADD_STAT(counter, val) ADD_STAT_FORCE(counter, val) +#define STAT_CYCLES_BEGIN(timer) ({ \ + timer = now_tsc(); \ +}) +#define ADD_STAT_CYCLES(counter, timer) ({ \ + ADD_STAT_FORCE(counter, now_tsc() - timer); \ +}) +#else +#define ADD_STAT(counter, val) +#define STAT_CYCLES_BEGIN(timer) ((void)timer) +#define ADD_STAT_CYCLES(counter, timer) +#endif + +void print_stats(void); diff --git a/include/skyloft/sync.h b/include/skyloft/sync.h new file mode 100644 index 0000000..f6abd6c --- /dev/null +++ b/include/skyloft/sync.h @@ -0,0 +1,7 @@ +#pragma once + +#include +#include +#include +#include +#include diff --git a/include/skyloft/sync/rcu.h b/include/skyloft/sync/rcu.h new file mode 100644 index 0000000..8410a06 --- /dev/null +++ b/include/skyloft/sync/rcu.h @@ -0,0 +1,117 @@ +/* + * rcu.h - support for read-copy-update + */ + +#pragma once + +#include +#include +#include +#include + +#ifdef DEBUG +extern __thread int rcu_read_count; +#endif /* DEBUG */ + +static inline bool rcu_read_lock_held(void) +{ +#ifdef DEBUG + return rcu_read_count > 0; +#else /* DEBUG */ + return true; +#endif /* DEBUG */ +} + +static inline void rcu_read_lock(void) +{ + preempt_disable(); +#ifdef DEBUG + rcu_read_count++; +#endif /* DEBUG */ +} + +static inline void rcu_read_unlock(void) +{ +#ifdef DEBUG + assert(rcu_read_lock_held()); + rcu_read_count--; +#endif /* DEBUG */ + preempt_enable(); +} + +#ifdef __CHECKER__ +#define rcu_check_type(p) ((void)(((typeof(*(p)) __rcu *)p) == p)) +#else /* __CHECKER__ */ +#define rcu_check_type(p) +#endif /* __CHECKER__ */ + +#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) + +/** + * RCU_INIT_POINTER - initializes an RCU pointer + * @p: the RCU pointer + * @v: the initialization value + * + * Use this variant at initialization time before the data is shared. Otherwise, + * you must use rcu_assign_pointer(). + */ +#define RCU_INIT_POINTER(p, v) \ + do { \ + rcu_check_type(p); \ + ACCESS_ONCE(p) = RCU_INITIALIZER(v); \ + } while (0) + +/** + * rcu_dereference - dereferences an RCU pointer in an RCU section + * @p: the RCU pointer + * + * Returns the RCU pointer value. + */ +#define rcu_dereference(p) \ + ({ \ + rcu_check_type(p); \ + assert(rcu_read_lock_held()); \ + atomic_load_con((typeof(*(p)) __force **)(&p)); \ + }) + +/** + * rcu_dereference_protected - dereferences an RCU pointer (modify or read) + * @p: the RCU pointer + * @c: a condition proving the access is safe, such as a check of whether a lock + * is held. + * + * An RCU pointer can be safely dereferenced if either the condition @c passes + * or an RCU read lock is held. + * + * TODO: consume barrier isn't needed if 'c' evaluates to true. + * + * Returns the RCU pointer value. + */ +#define rcu_dereference_protected(p, c) \ + ({ \ + rcu_check_type(p); \ + assert(rcu_read_lock_held() || !!(c)); \ + atomic_load_con((typeof(*(p)) __force **)(&p)); \ + }) + +/** + * rcu_assign_pointer - safely assigns a new value to an RCU pointer + * @p: the RCU pointer + * @v: the value to assign + */ +#define rcu_assign_pointer(p, v) \ + do { \ + rcu_check_type(p); \ + atomic_store_rel(&p, RCU_INITIALIZER(v)); \ + } while (0) + +struct rcu_head; +typedef void (*rcu_callback_t)(struct rcu_head *head); + +struct rcu_head { + struct rcu_head *next; + rcu_callback_t func; +}; + +void rcu_free(struct rcu_head *head, rcu_callback_t func); +void synchronize_rcu(void); diff --git a/include/skyloft/sync/rculist.h b/include/skyloft/sync/rculist.h new file mode 100644 index 0000000..b04fe58 --- /dev/null +++ b/include/skyloft/sync/rculist.h @@ -0,0 +1,73 @@ +/* + * rculist.h - support for RCU list data structures + */ + +#pragma once + +#include + +struct rcu_hlist_node { + struct rcu_hlist_node __rcu *next; + struct rcu_hlist_node *__rcu *pprev; +}; + +struct rcu_hlist_head { + struct rcu_hlist_node __rcu *head; +}; + +/** + * rcu_hlist_init_head - initializes an RCU hlist + * @h: the list head + */ +static inline void rcu_hlist_init_head(struct rcu_hlist_head *h) +{ + RCU_INIT_POINTER(h->head, NULL); +} + +/** + * rcu_hlist_add_head - adds a node to the head of an RCU hlist + * @h: the list head + * @n: the node to add + */ +static inline void rcu_hlist_add_head(struct rcu_hlist_head *h, struct rcu_hlist_node *n) +{ + struct rcu_hlist_node *head = h->head; + RCU_INIT_POINTER(n->next, head); + n->pprev = &h->head; + rcu_assign_pointer(h->head, n); + if (head) + head->pprev = &n->next; +} + +/** + * rcu_hlist_del - removes a node from an RCU hlist + * @n: the node to remove + */ +static inline void rcu_hlist_del(struct rcu_hlist_node *n) +{ + rcu_assign_pointer(*n->pprev, n->next); + if (n->next) + n->next->pprev = n->pprev; +} + +/** + * rcu_hlist_empty - returns true if the RCU hlist is empty + * @h: the list head + * @check: proof that a lock is held + * + * If @check is false, must be in an RCU critical section. + */ +static inline bool rcu_hlist_empty(struct rcu_hlist_head *h, bool check) +{ + return rcu_dereference_protected(h->head, check) == NULL; +} + +#define rcu_hlist_entry(n, type, member) container_of(n, type, member) + +#define rcu_hlist_for_each(h, pos, check) \ + for ((pos) = rcu_dereference_protected((h)->head, check); (pos); \ + (pos) = rcu_dereference_protected((pos)->next, check)) + +#define rcu_hlist_for_each_safe(h, pos, tmp, check) \ + for ((pos) = rcu_dereference_protected((h)->head, check); \ + (pos) && ((tmp) = rcu_dereference_protected((pos)->next, check), 1); (pos) = (tmp)) diff --git a/include/skyloft/sync/signal.h b/include/skyloft/sync/signal.h new file mode 100644 index 0000000..68eaa7b --- /dev/null +++ b/include/skyloft/sync/signal.h @@ -0,0 +1,7 @@ +/* + * signal.h - support for signal + */ + +#pragma once + +int signal_init(void); \ No newline at end of file diff --git a/include/skyloft/sync/sync.h b/include/skyloft/sync/sync.h new file mode 100644 index 0000000..12b9567 --- /dev/null +++ b/include/skyloft/sync/sync.h @@ -0,0 +1,143 @@ +/* + * sync.h - support for synchronization + */ + +#pragma once + +#include +#include + +#include +#include + +/* + * Mutex support + */ + +typedef struct { + bool held; + spinlock_t waiter_lock; + struct list_head waiters; +} mutex_t; + +bool mutex_try_lock(mutex_t *m); +void mutex_lock(mutex_t *m); +void mutex_unlock(mutex_t *m); +void mutex_init(mutex_t *m); + +/** + * mutex_held - is the mutex currently held? + * @m: the mutex to check + */ +static inline bool mutex_held(mutex_t *m) { return m->held; } + +/** + * assert_mutex_held - asserts that a mutex is currently held + * @m: the mutex that must be held + */ +static inline void assert_mutex_held(mutex_t *m) { assert(mutex_held(m)); } + +/* + * Condition variable support + */ + +typedef struct { + spinlock_t waiter_lock; + struct list_head waiters; +} condvar_t; + +void condvar_wait(condvar_t *cv, mutex_t *m); +void condvar_signal(condvar_t *cv); +void condvar_broadcast(condvar_t *cv); +void condvar_init(condvar_t *cv); + +/* + * Barrier support + */ + +typedef struct { + spinlock_t lock; + int waiting; + int count; + struct list_head waiters; +} barrier_t; + +void barrier_init(barrier_t *b, int count); +bool barrier_wait(barrier_t *b); + +/* + * Spin lock support + */ + +/** + * spin_lock_np - takes a spin lock and disables preemption + * @l: the spin lock + */ +static inline void spin_lock_np(spinlock_t *l) +{ + preempt_disable(); + spin_lock(l); +} + +/** + * spin_try_lock_np - takes a spin lock if its available and disables preemption + * @l: the spin lock + * + * Returns true if successful, otherwise fail. + */ +static inline bool spin_try_lock_np(spinlock_t *l) +{ + preempt_disable(); + if (spin_try_lock(l)) + return true; + + preempt_enable(); + return false; +} + +/** + * spin_unlock_np - releases a spin lock and re-enables preemption + * @l: the spin lock + */ +static inline void spin_unlock_np(spinlock_t *l) +{ + spin_unlock(l); + preempt_enable(); +} + +#define spin_lock_irqsave(lock, flags) \ + do { \ + local_irq_save(flags); \ + spin_lock(lock); \ + } while (0) + +#define spin_unlock_irqrestore(lock, flags) \ + do { \ + spin_unlock(lock); \ + local_irq_restore(flags); \ + } while (0) + +/* + * Wait group support + */ + +struct waitgroup { + spinlock_t lock; + int cnt; + struct list_head waiters; +}; + +typedef struct waitgroup waitgroup_t; + +void waitgroup_add(waitgroup_t *wg, int cnt); +void waitgroup_wait(waitgroup_t *wg); +void waitgroup_init(waitgroup_t *wg); + +/** + * waitgroup_done - notifies the wait group that one waiting event completed + * @wg: the wait group to complete + */ +static inline void waitgroup_done(waitgroup_t *wg) { waitgroup_add(wg, -1); } + +int futex_wait(int *uaddr, int val); +int futex_wake(int *uaddr, int val); diff --git a/include/skyloft/sync/timer.h b/include/skyloft/sync/timer.h new file mode 100644 index 0000000..718cb91 --- /dev/null +++ b/include/skyloft/sync/timer.h @@ -0,0 +1,62 @@ +/* + * timer.h - support for timers + */ + +#pragma once + +#include +#include + +typedef void (*timer_fn_t)(uint64_t arg); + +struct timer_entry { + bool armed; + uint32_t idx; + timer_fn_t fn; + uint64_t arg; + struct kthread *k; +}; + +/** + * timer_init - initializes a timer + * @e: the timer entry to initialize + * @fn: the timer handler (called when the timer fires) + * @arg: an argument passed to the timer handler + */ +static inline void timer_init(struct timer_entry *e, timer_fn_t fn, unsigned long arg) +{ + e->armed = false; + e->fn = fn; + e->arg = arg; +} + +void timer_start(struct timer_entry *e, uint64_t deadline_us); +bool timer_cancel(struct timer_entry *e); + +/* + * High-level API + */ + +void timer_sleep_until(uint64_t deadline_us); +void timer_sleep(uint64_t duration_us); + +struct timer_idx { + uint64_t deadline_us; + struct timer_entry *e; +}; + +void timer_softirq(struct kthread *k, unsigned int budget); +void timer_merge(struct kthread *r); +uint64_t timer_earliest_deadline(void); + +/** + * timer_needed - returns true if pending timers have to be handled + * @k: the kthread to check + */ +static inline bool timer_needed(struct kthread *k) +{ + /* deliberate race condition */ + return k->nr_timers > 0 && k->timers[0].deadline_us <= now_us(); +} + +int timer_init_percpu(void); \ No newline at end of file diff --git a/include/skyloft/task.h b/include/skyloft/task.h new file mode 100644 index 0000000..d4f1475 --- /dev/null +++ b/include/skyloft/task.h @@ -0,0 +1,79 @@ +/* + * task.h: support for user-level tasks + */ + +#pragma once + +#include + +#include +#include + +enum task_state { + TASK_IDLE, + TASK_RUNNABLE, + TASK_BLOCKED, +}; + +struct switch_frame { + /* callee saved registers */ + uint64_t rbx; + uint64_t rbp; + uint64_t r12; + uint64_t r13; + uint64_t r14; + uint64_t r15; + uint64_t rdi; /* first argument */ + uint64_t rip; +}; +struct stack { + union { + void *ptr; + uint8_t payload[RUNTIME_STACK_SIZE]; + }; +}; + +BUILD_ASSERT(sizeof(struct stack) == RUNTIME_STACK_SIZE); + +static inline uint64_t stack_top(struct stack *stack) +{ + return (uint64_t)stack + RUNTIME_STACK_SIZE; +} + +typedef void (*thread_fn_t)(void *arg); + +struct task { + /* cache line 0 */ + struct list_node link; + struct stack *stack; + enum task_state state; + int id, app_id; + uint8_t stack_busy; + bool allow_preempt; + bool skip_free; + uint64_t rsp; + uint8_t pad0[16]; + /* cache line 1~2 */ + uint8_t policy_task_data[POLICY_TASK_DATA_SIZE]; +} __aligned_cacheline; + +#define task_is_idle(t) ((t)->state == TASK_IDLE) +#define task_is_runnable(t) ((t)->state == TASK_RUNNABLE) +#define task_is_blocked(t) ((t)->state == TASK_BLOCKED) +#define task_is_dead(t) ((t)->state == TASK_DEAD) + + +struct task *task_create(thread_fn_t fn, void *arg); +struct task *task_create_with_buf(thread_fn_t fn, void **buf, size_t buf_len); +struct task *task_create_idle(); +void task_free(struct task *task); + +int sched_task_init(void *base); +int sched_task_init_percpu(void); + +#ifdef SCHED_PERCPU +#define TASK_SIZE_PER_APP (align_up(sizeof(struct task) * MAX_TASKS_PER_APP, PGSIZE_2MB)) +#else +#define TASK_SIZE_PER_APP \ + (align_up((sizeof(struct task) + sizeof(struct stack)) * MAX_TASKS_PER_APP, PGSIZE_2MB)) +#endif \ No newline at end of file diff --git a/include/skyloft/tcp.h b/include/skyloft/tcp.h new file mode 100644 index 0000000..8de6436 --- /dev/null +++ b/include/skyloft/tcp.h @@ -0,0 +1,30 @@ +/* + * tcp.h - TCP sockets + */ + +#pragma once + +#include +#include + +#include + +struct tcp_queue; +typedef struct tcp_queue tcp_queue_t; +struct tcp_conn; +typedef struct tcp_conn tcp_conn_t; + +int tcp_dial(struct netaddr laddr, struct netaddr raddr, tcp_conn_t **c_out); +int tcp_listen(struct netaddr laddr, int backlog, tcp_queue_t **q_out); +int tcp_accept(tcp_queue_t *q, tcp_conn_t **c_out); +void tcp_qshutdown(tcp_queue_t *q); +void tcp_qclose(tcp_queue_t *q); +struct netaddr tcp_local_addr(tcp_conn_t *c); +struct netaddr tcp_remote_addr(tcp_conn_t *c); +ssize_t tcp_read(tcp_conn_t *c, void *buf, size_t len); +ssize_t tcp_write(tcp_conn_t *c, const void *buf, size_t len); +ssize_t tcp_readv(tcp_conn_t *c, const struct iovec *iov, int iovcnt); +ssize_t tcp_writev(tcp_conn_t *c, const struct iovec *iov, int iovcnt); +int tcp_shutdown(tcp_conn_t *c, int how); +void tcp_abort(tcp_conn_t *c); +void tcp_close(tcp_conn_t *c); diff --git a/include/skyloft/uapi/dev.h b/include/skyloft/uapi/dev.h new file mode 100644 index 0000000..fcfbb81 --- /dev/null +++ b/include/skyloft/uapi/dev.h @@ -0,0 +1,18 @@ +#ifndef _SKYLOFT_UAPI_DEV_H_ +#define _SKYLOFT_UAPI_DEV_H_ + +#define SKYLOFT_IO_BASE 0x1000 +#define SKYLOFT_IO_PARK _IO(SKYLOFT_IO_BASE, 1) +#define SKYLOFT_IO_WAKEUP _IO(SKYLOFT_IO_BASE, 2) +#define SKYLOFT_IO_SWITCH_TO _IO(SKYLOFT_IO_BASE, 3) +#define SKYLOFT_IO_SETUP_DEVICE_UINTR _IO(SKYLOFT_IO_BASE, 4) +#define SKYLOFT_IO_TIMER_SET_HZ _IO(SKYLOFT_IO_BASE, 5) +#define SKYLOFT_IO_IPI_SEND _IO(SKYLOFT_IO_BASE, 6) +#define SKYLOFT_IO_IPI_BENCH _IO(SKYLOFT_IO_BASE, 7) + +#define SKYLOFT_SETUP_DEVICE_UINTR_FLAGS_TIMER 0x1 + +#define SKYLOFT_DEV_NAME "skyloft" +#define SKYLOFT_DEV_PATH "/dev/" SKYLOFT_DEV_NAME + +#endif // _SKYLOFT_UAPI_DEV_H_ diff --git a/include/skyloft/uapi/pthread.h b/include/skyloft/uapi/pthread.h new file mode 100644 index 0000000..bf14dac --- /dev/null +++ b/include/skyloft/uapi/pthread.h @@ -0,0 +1,41 @@ +/* + * pthread.h - support for pthread-like APIs + */ + +#ifndef _SKYLOFT_UAPI_PTHREAD_H_ +#define _SKYLOFT_UAPI_PTHREAD_H_ + +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +int sl_pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*fn)(void *), + void *arg); +int sl_pthread_join(pthread_t thread, void **retval); +int sl_pthread_detach(pthread_t thread); +int sl_pthread_yield(void); +pthread_t sl_pthread_self(); +void __attribute__((noreturn)) sl_pthread_exit(void *retval); + +int sl_pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutexattr); +int sl_pthread_mutex_lock(pthread_mutex_t *mutex); +int sl_pthread_mutex_trylock(pthread_mutex_t *mutex); +int sl_pthread_mutex_unlock(pthread_mutex_t *mutex); +int sl_pthread_mutex_destroy(pthread_mutex_t *mutex); + +int sl_pthread_cond_init(pthread_cond_t *__restrict cond, + const pthread_condattr_t *__restrict cond_attr); +int sl_pthread_cond_signal(pthread_cond_t *cond); +int sl_pthread_cond_broadcast(pthread_cond_t *cond); +int sl_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex); +int sl_pthread_cond_destroy(pthread_cond_t *cond); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/skyloft/uapi/task.h b/include/skyloft/uapi/task.h new file mode 100644 index 0000000..3648669 --- /dev/null +++ b/include/skyloft/uapi/task.h @@ -0,0 +1,71 @@ +/* + * task.h - support for skyloft custom APIs + */ + +#ifndef _SKYLOFT_UAPI_TASK_H_ +#define _SKYLOFT_UAPI_TASK_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define __api + +typedef void (*thread_fn_t)(void *arg); +typedef int (*initializer_fn_t)(void); + +static inline int __api sl_current_app_id() +{ + extern int g_app_id; + return g_app_id; +} + +static inline int __api sl_current_cpu_id() +{ + extern __thread int g_logic_cpu_id; + return g_logic_cpu_id; +} + +int __api sl_current_task_id(); + +int __api sl_task_spawn(thread_fn_t fn, void *arg, int stack_size); +int __api sl_task_spawn_oncpu(int cpu_id, thread_fn_t fn, void *arg, int stack_size); +void __api sl_task_yield(); +void __attribute__((noreturn)) __api sl_task_exit(int code); + +const char *__api sl_sched_policy_name(); +int __api sl_sched_set_params(void *params); +void __api sl_sched_poll(); + +int __api sl_set_initializers(initializer_fn_t global, initializer_fn_t percpu, + initializer_fn_t late); +int __api sl_libos_start(thread_fn_t app_main, void *arg); +int __api sl_libos_start_daemon(); + +void __api sl_dump_tasks(); + +void __api sl_sleep(int secs); +void __api sl_usleep(int usecs); + +#define FUTEX_WAIT 0 +#define FUTEX_WAKE 1 + +#define FUTEX_PRIVATE_FLAG 128 +#define FUTEX_CLOCK_REALTIME 256 +#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) + +#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) +#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) + +struct timespec; + +int __api sl_futex(int *uaddr, int op, int val, const struct timespec *timeout, int *uaddr2, + int val3); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/skyloft/udp.h b/include/skyloft/udp.h new file mode 100644 index 0000000..64f5193 --- /dev/null +++ b/include/skyloft/udp.h @@ -0,0 +1,74 @@ +/* + * udp.h - UDP sockets + */ + +#pragma once + +#include +#include + +#include + +/* the maximum size of a UDP payload */ +#define UDP_MAX_PAYLOAD 1472 + +/* + * UDP Socket API + */ + +struct udp_conn; +typedef struct udp_conn udp_conn_t; + +int udp_dial(struct netaddr laddr, struct netaddr raddr, udp_conn_t **c_out); +int udp_listen(struct netaddr laddr, udp_conn_t **c_out); +struct netaddr udp_local_addr(udp_conn_t *c); +struct netaddr udp_remote_addr(udp_conn_t *c); +int udp_set_buffers(udp_conn_t *c, int read_mbufs, int write_mbufs); +ssize_t udp_read_from(udp_conn_t *c, void *buf, size_t len, struct netaddr *raddr); +ssize_t udp_write_to(udp_conn_t *c, const void *buf, size_t len, const struct netaddr *raddr); +ssize_t udp_read(udp_conn_t *c, void *buf, size_t len); +ssize_t udp_write(udp_conn_t *c, const void *buf, size_t len); +void udp_shutdown(udp_conn_t *c); +void udp_close(udp_conn_t *c); + +/* + * UDP Parallel API + */ + +struct udp_spawner; +typedef struct udp_spawner udp_spawner_t; +typedef struct udp_spawn_data udp_spawn_data_t; + +struct udp_spawn_data { + const void *buf; + size_t len; + struct netaddr laddr; + struct netaddr raddr; + void *release_data; +}; + +typedef void (*udpspawn_fn_t)(struct udp_spawn_data *d); + +int udp_create_spawner(struct netaddr laddr, udpspawn_fn_t fn, udp_spawner_t **s_out); +void udp_destroy_spawner(udp_spawner_t *s); +ssize_t udp_send(const void *buf, size_t len, struct netaddr laddr, struct netaddr raddr); +ssize_t udp_sendv(const struct iovec *iov, int iovcnt, struct netaddr laddr, struct netaddr raddr); +void udp_spawn_data_release(void *release_data); + +/** + * udp_respond - sends a response datagram to a spawner datagram + * @buf: a buffer containing the datagram + * @len: the length of the datagram + * @d: the UDP spawner data + * + * Returns @len if successful, otherwise fail. + */ +static inline ssize_t udp_respond(const void *buf, size_t len, struct udp_spawn_data *d) +{ + return udp_send(buf, len, d->laddr, d->raddr); +} + +static inline ssize_t udp_respondv(const struct iovec *iov, int iovcnt, struct udp_spawn_data *d) +{ + return udp_sendv(iov, iovcnt, d->laddr, d->raddr); +} diff --git a/kmod/Makefile b/kmod/Makefile new file mode 100644 index 0000000..5637cc8 --- /dev/null +++ b/kmod/Makefile @@ -0,0 +1,35 @@ +module := skyloft + +CPU_LIST ?= 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21 + +KDIR ?= /lib/modules/$(shell uname -r)/build +DEBUG ?= +UINTR ?= 1 + +KBUILD_CFLAGS += -I$(PWD)/../include + +ifneq ($(DEBUG),) + KBUILD_CFLAGS += -DDEBUG +endif +ifneq ($(UINTR),) + KBUILD_CFLAGS += -DSKYLOFT_UINTR +endif + +obj-m := $(module).o + +$(module)-y := main.o + +all: + make -C $(KDIR) M=$(PWD) modules + +clean: + make -C $(KDIR) M=$(PWD) clean + +insmod: rmmod + sudo insmod $(module).ko cpu_list=$(CPU_LIST) + sudo chmod 666 /dev/$(module) + +rmmod: +ifneq ($(shell lsmod | grep $(module)),) + sudo rmmod $(module) +endif diff --git a/kmod/main.c b/kmod/main.c new file mode 100644 index 0000000..e013544 --- /dev/null +++ b/kmod/main.c @@ -0,0 +1,319 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define UINTR_ENABLE (defined(CONFIG_X86_USER_INTERRUPTS) && defined(SKYLOFT_UINTR)) + +#if UINTR_ENABLE +#include +#include +#endif + +#include + +#define MAX_CPUS 128 + +static int cpu_list_len; +static uint cpu_list[MAX_CPUS]; +module_param_array(cpu_list, uint, &cpu_list_len, S_IRUGO); + +static struct cpumask skyloft_cpu_mask; + +static inline uint64_t now_tsc(void) +{ + uint32_t lo, hi; + asm volatile("rdtscp" : "=a"(lo), "=d"(hi)::"rcx"); + return ((uint64_t)hi << 32) | lo; +} + +static int skyloft_open(struct inode *inode, struct file *file) +{ + pr_info("skyloft: skyloft_open\n"); + return 0; +} + +static int skyloft_release(struct inode *inode, struct file *file) +{ + pr_info("skyloft: skyloft_release\n"); + return 0; +} + +#if UINTR_ENABLE +static int skyloft_setup_device_uintr(int flags) +{ + pr_info("skyloft: skyloft_setup_device_uintr on CPU %d, flags=%d\n", smp_processor_id(), flags); + + if (!current->thread.upid_activated) { + pr_warn("skyloft: not a user interrupt receiver\n"); + return -EINVAL; + } + + current->thread.upid_ctx->suppressed = true; + current->thread.upid_ctx->timer_uintr = !!(flags & SKYLOFT_SETUP_DEVICE_UINTR_FLAGS_TIMER); + return 0; +} + +static int skyloft_ipi_send(int cpu) +{ + unsigned long flags; + pr_debug_ratelimited("skyloft: send IPI from CPU %d to %d\n", smp_processor_id(), cpu); + + local_irq_save(flags); + apic->send_IPI(cpu, UINTR_SKYLOFT_VECTOR); + local_irq_restore(flags); + return 0; +} + +#define APIC_TIMER_PERIOD 25000000 +#define APIC_DIVISOR 1 + +int skyloft_timer_set_hz(u32 hz) +{ + unsigned long flags; + u32 clock = APIC_TIMER_PERIOD / hz / APIC_DIVISOR; + u32 actual_hz = APIC_TIMER_PERIOD / APIC_DIVISOR / clock; + + if (!is_skyloft_enabled()) { + pr_warn("skyloft: skyloft is not enabled on CPU %d\n", smp_processor_id()); + return -EINVAL; + } + + if (clock == 0) { + pr_warn("skyloft: too high timer frequency\n"); + return -EINVAL; + } + + pr_info("skyloft: set timer frequency to %d Hz (TMICT=%d, actual=%d)\n", hz, clock, actual_hz); + + local_irq_save(flags); + apic_write(APIC_LVTT, APIC_LVT_TIMER_PERIODIC | UINTR_SKYLOFT_VECTOR); + apic_write(APIC_TDCR, APIC_TDR_DIV_1); + apic_write(APIC_TMICT, clock); + pr_info("skyloft: APIC_TMICT 0x%x APIC_TMCCT 0x%x APIC_TMDCR 0x%x APIC_LVTT 0x%x\n", + apic_read(APIC_TMICT), apic_read(APIC_TMCCT), apic_read(APIC_TDCR), + apic_read(APIC_LVTT)); + local_irq_restore(flags); + return 0; +} + +#define IPI_SEND_COUNT 10000000 + +static int skyloft_ipi_bench(int cpu) +{ + uint64_t start, end; + int count = IPI_SEND_COUNT; + int ipi_recv_count = per_cpu(irq_stat, cpu).skyloft_timer_spurious_count; + + pr_info("skyloft: bench IPI send from CPU %d to %d\n", smp_processor_id(), cpu); + + start = now_tsc(); + while (count--) { + apic->send_IPI(cpu, UINTR_SKYLOFT_VECTOR); + } + end = now_tsc(); + + ipi_recv_count = per_cpu(irq_stat, cpu).skyloft_timer_spurious_count - ipi_recv_count; + pr_info("skyloft: skyloft_ipi_bench: total=%lld, avg=%lld (cycles), ipi_recv_cnt = %d\n", end - start, + (end - start) / IPI_SEND_COUNT, ipi_recv_count); + return 0; +} +#endif + +static struct task_struct *ksched_lookup_task(pid_t pid) +{ + return pid_task(find_vpid(pid), PIDTYPE_PID); +} + +static int skyloft_park_on_cpu(int cpu_id) +{ + int ret = 0; + pr_info("skyloft: skyloft_park_on_cpu %d: %d -> %d\n", current->pid, smp_processor_id(), + cpu_id); + + if (cpu_id >= 0) { + if (cpu_id >= num_possible_cpus() || !cpumask_test_cpu(cpu_id, &skyloft_cpu_mask)) { + pr_warn("skyloft: CPU %d is not bound to skyloft\n", cpu_id); + ret = -EBUSY; + goto out; + } + + ret = set_cpus_allowed_ptr(current, cpumask_of(cpu_id)); + if (ret) { + pr_warn("skyloft: failed to bind to CPU %d\n", cpu_id); + goto out; + } + } + + if (task_is_running(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + } + +out: + schedule(); + return ret; +} + +static int skyloft_wakeup(pid_t pid) +{ + struct task_struct *p; + + rcu_read_lock(); + p = ksched_lookup_task(pid); + + if (!p) { + rcu_read_unlock(); + return -ESRCH; + } + + if (task_is_running(p)) { + rcu_read_unlock(); + return -EBUSY; + } + + wake_up_process(p); + rcu_read_unlock(); + return 0; +} + +static int skyloft_switch_to(pid_t target_tid) +{ + int err; + unsigned long flags; + + local_irq_save(flags); + + if (target_tid > 0) { + if ((err = skyloft_wakeup(target_tid))) { + local_irq_restore(flags); + return err; + } + } + + __set_current_state(TASK_INTERRUPTIBLE); + local_irq_restore(flags); + schedule(); + return 0; +} + +static void skyloft_bind_cpu(void *info) +{ +#if UINTR_ENABLE + pr_info("skyloft: bind on CPU %d\n", smp_processor_id()); + if (skyloft_enable()) { + pr_warn("skyloft: failed to bind on CPU %d\n", smp_processor_id()); + return; + } +#endif +} + +static void skyloft_unbind_cpu(void *info) +{ +#if UINTR_ENABLE + pr_info("skyloft: unbind on CPU %d\n", smp_processor_id()); + if (skyloft_disable()) { + pr_warn("skyloft: failed to unbind on CPU %d\n", smp_processor_id()); + return; + } +#endif +} + +static long skyloft_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case SKYLOFT_IO_PARK: + return skyloft_park_on_cpu(arg); + case SKYLOFT_IO_WAKEUP: + return skyloft_wakeup(arg); + case SKYLOFT_IO_SWITCH_TO: + return skyloft_switch_to(arg); +#if UINTR_ENABLE + case SKYLOFT_IO_IPI_BENCH: + return skyloft_ipi_bench(arg); + case SKYLOFT_IO_SETUP_DEVICE_UINTR: + return skyloft_setup_device_uintr(arg); + case SKYLOFT_IO_TIMER_SET_HZ: + return skyloft_timer_set_hz((u32)arg); + case SKYLOFT_IO_IPI_SEND: + return skyloft_ipi_send(arg); +#endif + default: + return -EINVAL; + } +} + +static const struct file_operations skyloft_fops = { + .owner = THIS_MODULE, + .open = skyloft_open, + .release = skyloft_release, + .unlocked_ioctl = skyloft_ioctl, +}; + +struct miscdevice skyloft_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = SKYLOFT_DEV_NAME, + .fops = &skyloft_fops, +}; + +static int __init skyloft_init(void) +{ + int err; + + if (cpu_list_len == 0) { + pr_err("skyloft: parameter `cpu_list` is empty\n"); + return -EINVAL; + } + + for (int i = 0; i < cpu_list_len; i++) { + if (cpu_list[i] >= num_possible_cpus()) { + pr_err("skyloft: invalid cpu id %d\n", cpu_list[i]); + return -EINVAL; + } + if (housekeeping_cpu(cpu_list[i], HK_TYPE_DOMAIN)) { + pr_err("skyloft: cpu %d is not isolated\n", cpu_list[i]); + return -EINVAL; + } + cpumask_set_cpu(cpu_list[i], &skyloft_cpu_mask); + } + + err = misc_register(&skyloft_device); + if (err) { + pr_err("skyloft: cannot register misc device\n"); + return err; + } + + preempt_disable(); + on_each_cpu_mask(&skyloft_cpu_mask, skyloft_bind_cpu, NULL, 1); + preempt_enable(); + + pr_info("skyloft: module initialized\n"); + return 0; +} + +static void __exit skyloft_exit(void) +{ + misc_deregister(&skyloft_device); + + preempt_disable(); + on_each_cpu_mask(&skyloft_cpu_mask, skyloft_unbind_cpu, NULL, 1); + preempt_enable(); + + pr_info("skyloft: module exited\n"); +} + +module_init(skyloft_init); +module_exit(skyloft_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Yuekai Jia"); +MODULE_DESCRIPTION("Skyloft kernel module."); +MODULE_VERSION("0.01"); diff --git a/libos/CMakeLists.txt b/libos/CMakeLists.txt new file mode 100644 index 0000000..ccda3a5 --- /dev/null +++ b/libos/CMakeLists.txt @@ -0,0 +1,69 @@ +enable_language(C ASM) + +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/../params.h.in + ${CMAKE_CURRENT_SOURCE_DIR}/../include/skyloft/uapi/params.h + COPYONLY +) +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/../params.h.in + ${CMAKE_CURRENT_SOURCE_DIR}/../include/skyloft/params.h + COPYONLY +) + +aux_source_directory(sched SCHED_SRCS) +aux_source_directory(platform PLATFORM_SRCS) +aux_source_directory(mm MM_SRCS) +aux_source_directory(net NET_SRCS) +aux_source_directory(shim SHIM_SRCS) +aux_source_directory(sync SYNC_SRCS) +aux_source_directory(io IO_SRCS) +set(TOP_SRCS libos.c percpu.c exit.c stat.c) + +add_definitions(-D_GNU_SOURCE) + +if(SIGNAL) + add_definitions(-DSKYLOFT_SIGNAL_SWITCH) +endif() + +if(TIMER) + add_definitions(-DSKYLOFT_TIMER) +endif() + +if(UINTR) + add_definitions(-DSKYLOFT_TIMER) + add_definitions(-DSKYLOFT_UINTR) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -muintr") +endif() + +if(SCHED_POLICY) + set(SCHED_POLICY_SRC sched/policy/${SCHED_POLICY}.c) +endif() + +set(SCHED_SRCS ${SCHED_SRCS} ${SCHED_POLICY_SRC} sched/switch.S) +set(ALL_SRCS ${TOP_SRCS} ${SCHED_SRCS} ${PLATFORM_SRCS} ${MM_SRCS} ${SYNC_SRCS} ${NET_SRCS}) + +if(DPDK) + set(ALL_SRCS ${ALL_SRCS} ${IO_SRCS}) +endif() + +add_library(skyloft ${ALL_SRCS}) +target_link_libraries(skyloft utils pthread rt numa) +target_include_directories(skyloft PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../include) + +if(DPDK) + find_package(PkgConfig REQUIRED) + pkg_check_modules(LIBDPDK REQUIRED libdpdk) + target_compile_options(skyloft PRIVATE ${LIBDPDK_CFLAGS}) + target_link_libraries(skyloft ${LIBDPDK_LDFLAGS}) + add_definitions(-DSKYLOFT_DPDK) +endif() + +add_library(shim ${SHIM_SRCS}) +target_link_libraries(shim dl utils skyloft) + +# Install static libraries, linker scripts and public headers +install(TARGETS skyloft utils shim ARCHIVE DESTINATION lib LIBRARY DESTINATION lib) +install(FILES libos.ld DESTINATION lib) +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/../include/ DESTINATION include) +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/../utils/include/ DESTINATION include) diff --git a/libos/exit.c b/libos/exit.c new file mode 100644 index 0000000..1c0762c --- /dev/null +++ b/libos/exit.c @@ -0,0 +1,50 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +void __real_exit(int code); + +void __wrap_exit(int code) +{ + int i, target_tid; + + if (!shm_apps || !current_app()) + goto real_exit; + + if (atomic_flag_test_and_set(¤t_app()->exited)) + return; + + log_info("Exiting libos ..."); + +#ifdef SKYLOFT_STAT + print_stats(); +#endif + + spin_lock(&shm_metadata->lock); + shm_metadata->nr_apps--; + spin_unlock(&shm_metadata->lock); + + /* bind to other CPUs that enable timer interrupts to make wakeup and exit successful */ + unbind_cpus(0); + + if (!is_daemon()) { + for (i = 0; i < USED_CPUS; i++) { + target_tid = shm_apps[DAEMON_APP_ID].all_ks[i].tid; + atomic_store(&shm_metadata->apps[i], DAEMON_APP_ID); + skyloft_wakeup(target_tid); + } + } else if (shm_metadata->nr_apps == 0) { + unlink(SHM_META_PATH); + unlink(SHM_APPS_PATH); + } else { + log_warn("Daemon should not exit"); + } + +real_exit: + __real_exit(code); +} diff --git a/libos/io/cmd.c b/libos/io/cmd.c new file mode 100644 index 0000000..3b593c3 --- /dev/null +++ b/libos/io/cmd.c @@ -0,0 +1,61 @@ +/* + * commands.c - dataplane commands to/from runtimes + */ + +#include + +#include +#include +#include +#include + +static int cmd_drain_queue(struct kthread *t, struct rte_mbuf **bufs, int n) +{ + int i, n_bufs = 0; + + for (i = 0; i < n; i++) { + uint64_t cmd; + unsigned long payload; + + if (!lrpc_recv(&t->txcmdq, &cmd, &payload)) + break; + + switch (cmd) { + case TXCMD_NET_COMPLETE: + bufs[n_bufs++] = (struct rte_mbuf *)payload; + break; + + default: + /* kill the runtime? */ + BUG(); + } + } + + return n_bufs; +} + +/* + * Process a batch of commands from runtimes. + */ +bool cmd_rx_burst(void) +{ + struct rte_mbuf *bufs[IO_CMD_BURST_SIZE]; + int i, n_bufs = 0, idx; + static unsigned int pos = 0; + + /* + * Poll each thread in each runtime until all have been polled or we + * have processed CMD_BURST_SIZE commands. + */ + for (i = 0; i < proc->nr_ks; i++) { + idx = (pos + i) % proc->nr_ks; + + if (n_bufs >= IO_CMD_BURST_SIZE) + break; + n_bufs += cmd_drain_queue(&proc->all_ks[idx], &bufs[n_bufs], IO_CMD_BURST_SIZE - n_bufs); + } + + pos++; + for (i = 0; i < n_bufs; i++) rte_pktmbuf_free(bufs[i]); + return n_bufs > 0; +} diff --git a/libos/io/completion.c b/libos/io/completion.c new file mode 100644 index 0000000..ce51806 --- /dev/null +++ b/libos/io/completion.c @@ -0,0 +1,94 @@ +/* + * mempool_completion.c - a single producer, single consumer mempool that sends + * completion events when tx buffers can be freed. Based on rte_mempool_stack.c. + */ + +#include +#include + +#include +#include + +struct completion_stack { + uint32_t size; + uint32_t len; + void *objs[]; +}; + +static int completion_enqueue(struct rte_mempool *mp, void *const *obj_table, unsigned n) +{ + unsigned long i; + struct completion_stack *s = mp->pool_data; + + if (unlikely(s->len + n > s->size)) + return -ENOBUFS; + + for (i = 0; i < n; i++) { + /* Give up on notifying the runtime if this returns false. */ + tx_send_completion(obj_table[i]); + } + +#pragma GCC ivdep + for (i = 0; i < n; i++) { + s->objs[s->len + i] = obj_table[i]; + } + + s->len += n; + return 0; +} + +static int completion_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) +{ + unsigned long i, j; + struct completion_stack *s = mp->pool_data; + if (unlikely(n > s->len)) + return -ENOBUFS; + + s->len -= n; +#pragma GCC ivdep + for (i = 0, j = s->len; i < n; i++, j++) { + obj_table[i] = s->objs[j]; + } + + return 0; +} + +static unsigned completion_get_count(const struct rte_mempool *mp) +{ + struct completion_stack *s = mp->pool_data; + return s->len; +} + +static int completion_alloc(struct rte_mempool *mp) +{ + struct completion_stack *s; + unsigned n = mp->size; + int size = sizeof(*s) + (n + 16) * sizeof(void *); + s = rte_zmalloc_socket(mp->name, size, RTE_CACHE_LINE_SIZE, mp->socket_id); + if (!s) { + log_err("Could not allocate stack"); + return -ENOMEM; + } + + s->len = 0; + s->size = n; + mp->pool_data = s; + return 0; +} + +static void completion_free(struct rte_mempool *mp) +{ + rte_free(mp->pool_data); +} + +/* + * Dummy mempool that sends completion events on enqueue and does nothing else. + */ +struct rte_mempool_ops ops_completion = { + .name = "completion", + .alloc = completion_alloc, + .free = completion_free, + .enqueue = completion_enqueue, + .dequeue = completion_dequeue, + .get_count = completion_get_count, +}; diff --git a/libos/io/dpdk.c b/libos/io/dpdk.c new file mode 100644 index 0000000..b342957 --- /dev/null +++ b/libos/io/dpdk.c @@ -0,0 +1,224 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * dpdk.c - DPDK initialization for the iokernel dataplane + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define DEFAULT_RXQ_SIZE 128 /* default rx queue size */ +#define DEFAULT_TXQ_SIZE 128 /* default tx queue size */ + +static const struct rte_eth_conf port_conf_default = { + .rxmode = + { + .max_lro_pkt_size = ETH_MAX_LEN, + .offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, + .mq_mode = RTE_ETH_MQ_RX_RSS, + }, + .rx_adv_conf = + { + .rss_conf = + { + .rss_key = NULL, + .rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP, + }, + }, + .txmode = + { + .offloads = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM, + }, +}; + +/* + * Initializes a given port using global settings and with the RX buffers + * coming from the mbuf_pool passed as a parameter. + */ +static inline int dpdk_port_init(uint8_t port, struct rte_mempool *mbuf_pool) +{ + struct rte_eth_conf port_conf = port_conf_default; + const uint16_t rx_rings = 1, tx_rings = 1; + uint16_t nb_rxd = DEFAULT_RXQ_SIZE; + uint16_t nb_txd = DEFAULT_TXQ_SIZE; + int retval; + uint16_t q; + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf *txconf; + struct rte_eth_rxconf *rxconf; + + if (!rte_eth_dev_is_valid_port(port)) + return -1; + + /* Configure the Ethernet device. */ + retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); + if (retval != 0) + return retval; + + retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd); + if (retval != 0) + return retval; + + rte_eth_dev_info_get(0, &dev_info); + rxconf = &dev_info.default_rxconf; + rxconf->rx_free_thresh = 64; + + /* Allocate and set up 1 RX queue per Ethernet port. */ + for (q = 0; q < rx_rings; q++) { + retval = + rte_eth_rx_queue_setup(port, q, nb_rxd, rte_eth_dev_socket_id(port), rxconf, mbuf_pool); + if (retval < 0) + return retval; + } + + /* Enable TX offloading */ + txconf = &dev_info.default_txconf; + txconf->tx_rs_thresh = 64; + txconf->tx_free_thresh = 64; + + /* Allocate and set up 1 TX queue per Ethernet port. */ + for (q = 0; q < tx_rings; q++) { + retval = rte_eth_tx_queue_setup(port, q, nb_txd, rte_eth_dev_socket_id(port), txconf); + if (retval < 0) + return retval; + } + + /* Start the Ethernet port. */ + retval = rte_eth_dev_start(port); + if (retval < 0) + return retval; + + /* Display the port MAC address. */ + struct rte_ether_addr addr; + rte_eth_macaddr_get(port, &addr); + log_info("dpdk: port %u MAC %02X:%02X:%02X:%02X:%02X:%02X", (unsigned)port, addr.addr_bytes[0], + addr.addr_bytes[1], addr.addr_bytes[2], addr.addr_bytes[3], addr.addr_bytes[4], + addr.addr_bytes[5]); + + /* Enable RX in promiscuous mode for the Ethernet device. */ + rte_eth_promiscuous_enable(port); + + return 0; +} + +/* + * Log some ethernet port stats. + */ +void dpdk_print_eth_stats() +{ + int ret; + struct rte_eth_stats stats; + + ret = rte_eth_stats_get(io->port_id, &stats); + if (ret) + log_debug("dpdk: error getting eth stats"); + + fprintf(stderr, "eth stats for port %d at time %" PRIu64 "\n", io->port_id, now_us()); + fprintf(stderr, "RX-packets: %" PRIu64 " RX-dropped: %" PRIu64 " RX-bytes: %" PRIu64 "\n", + stats.ipackets, stats.imissed, stats.ibytes); + fprintf(stderr, "TX-packets: %" PRIu64 " TX-bytes: %" PRIu64 "\n", stats.opackets, + stats.obytes); + fprintf(stderr, "RX-error: %" PRIu64 " TX-error: %" PRIu64 " RX-mbuf-fail: %" PRIu64 "\n", + stats.ierrors, stats.oerrors, stats.rx_nombuf); +} + +/* + * Initialize dpdk, must be done as soon as possible. + */ +int dpdk_init() +{ + char *argv[4]; + char buf[10]; + + /* init args */ + argv[0] = "./skyloft_iothread"; + argv[1] = "-l"; + /* use our assigned core */ + sprintf(buf, "%d", hw_cpu_id(IO_CPU)); + argv[2] = buf; + argv[3] = "--socket-mem=128"; + + /* initialize the Environment Abstraction Layer (EAL) */ + int ret = rte_eal_init(sizeof(argv) / sizeof(argv[0]), argv); + if (ret < 0) { + log_err("dpdk: error with EAL initialization"); + return -EINVAL; + } + + /* check that there is a port to send/receive on */ + if (!rte_eth_dev_is_valid_port(0)) { + log_err("dpdk: no available ports"); + return -EBUSY; + } + + if (rte_lcore_count() > 1) + log_warn("dpdk: too many lcores enabled, only 1 used"); + + return 0; +} + +/* + * Additional dpdk initialization that must be done after rx init. + */ +int dpdk_late_init() +{ + /* initialize port */ + io->port_id = 0; + if (dpdk_port_init(io->port_id, io->rx_mbuf_pool) != 0) { + log_err("dpdk: cannot init port %" PRIu8 "\n", io->port_id); + return -1; + } + + if (rte_eth_dev_socket_id(io->port_id) != (int)rte_socket_id()) { + log_err("dpdk: port %u (socket %d) is on remote NUMA node to polling thread (socket %d). " + "Performance will not be optimal.", + io->port_id, rte_eth_dev_socket_id(io->port_id), rte_socket_id()); + } + + log_info("dpdk: I/O thread on CPU %u", rte_lcore_id()); + + return 0; +} diff --git a/libos/io/main.c b/libos/io/main.c new file mode 100644 index 0000000..80609fe --- /dev/null +++ b/libos/io/main.c @@ -0,0 +1,177 @@ +/* + * main.c: a thread running on the CPU dedicated for network I/O + */ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define LOG_INTERVAL_US (3000 * 1000) + +struct iothread_t *io; +physaddr_t *page_paddrs; + +int str_to_ip(const char *str, uint32_t *addr) +{ + uint8_t a, b, c, d; + if (sscanf(str, "%hhu.%hhu.%hhu.%hhu", &a, &b, &c, &d) != 4) { + return -EINVAL; + } + + *addr = MAKE_IP_ADDR(a, b, c, d); + return 0; +} + +int str_to_mac(const char *str, struct eth_addr *addr) +{ + size_t i; + static const char *fmts[] = {"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", + "%hhx%hhx%hhx%hhx%hhx%hhx"}; + + for (i = 0; i < ARRAY_SIZE(fmts); i++) { + if (sscanf(str, fmts[i], &addr->addr[0], &addr->addr[1], &addr->addr[2], &addr->addr[3], + &addr->addr[4], &addr->addr[5]) == 6) { + return 0; + } + } + return -EINVAL; +} + +static size_t cal_chan_size(int nr) +{ + size_t ret = 0, q; + + /* RX queues */ + q = sizeof(struct lrpc_msg) * IO_PKTQ_SIZE; + q = align_up(q, CACHE_LINE_SIZE); + ret += q * nr; + + /* TX packet queues */ + q = sizeof(struct lrpc_msg) * IO_PKTQ_SIZE; + q = align_up(q, CACHE_LINE_SIZE); + ret += q * nr; + + /* TX command queues */ + q = sizeof(struct lrpc_msg) * IO_CMDQ_SIZE; + q = align_up(q, CACHE_LINE_SIZE); + ret += q * nr; + + /* TX egress pool */ + ret = align_up(ret, PGSIZE_2MB); + ret += EGRESS_POOL_SIZE(proc->nr_ks); + ret = align_up(ret, PGSIZE_2MB); + + return ret; +} + +int iothread_init(void) +{ + int i, ret = 0; + void *base; + size_t len, nr_pages; + struct kthread *k; + char *ptr; + + io = (struct iothread_t *)malloc(sizeof(struct iothread_t)); + + /* default configurations */ + str_to_ip(IO_ADDR, &io->addr); + str_to_mac(IO_MAC, &io->mac); + str_to_ip(IO_GATEWAY, &io->gateway); + str_to_ip(IO_NETMASK, &io->netmask); + + /* map communication shared memory for command queues and egress packets */ + len = cal_chan_size(proc->nr_ks); + base = mem_map_anom(NULL, len, PGSIZE_2MB, 0); + if (base == MAP_FAILED) { + log_err("ioqueues: mem_map_shm() failed"); + return -1; + } + + ptr = base; + for (i = 0; i < proc->nr_ks; i++) { + k = &proc->all_ks[i]; + mbufq_init(&k->txpktq_overflow); + mbufq_init(&k->txcmdq_overflow); + lrpc_init(&k->rxq, (struct lrpc_msg *)ptr, IO_PKTQ_SIZE); + ptr += align_up(sizeof(struct lrpc_msg) * IO_PKTQ_SIZE, CACHE_LINE_SIZE); + lrpc_init(&k->txpktq, (struct lrpc_msg *)ptr, IO_PKTQ_SIZE); + ptr += align_up(sizeof(struct lrpc_msg) * IO_PKTQ_SIZE, CACHE_LINE_SIZE); + lrpc_init(&k->txcmdq, (struct lrpc_msg *)ptr, IO_CMDQ_SIZE); + ptr += align_up(sizeof(struct lrpc_msg) * IO_CMDQ_SIZE, CACHE_LINE_SIZE); + } + + ptr = (char *)align_up((uintptr_t)ptr, PGSIZE_2MB); + io->tx_region.base = ptr; + io->tx_region.len = EGRESS_POOL_SIZE(proc->nr_ks); + + /* initialize the table of physical page addresses */ + nr_pages = div_up(io->tx_region.len, PGSIZE_2MB); + page_paddrs = malloc(nr_pages * sizeof(physaddr_t)); + ret = + mem_lookup_page_phys_addrs(io->tx_region.base, io->tx_region.len, PGSIZE_2MB, page_paddrs); + if (ret) + return ret; + + proc->max_overflows = EGRESS_POOL_SIZE(proc->nr_ks) / MBUF_DEFAULT_LEN; + proc->nr_overflows = 0; + proc->overflow_queue = malloc(sizeof(uint64_t) * proc->max_overflows); + + return ret; +} + +static const struct init_entry init_handlers[] = { + /* dataplane */ + INITIALIZER(dpdk, init), + INITIALIZER(rx, init), + INITIALIZER(tx, init), + INITIALIZER(dpdk_late, init), + + /* network stack */ + INITIALIZER(net, init), + INITIALIZER(arp, init), + INITIALIZER(trans, init), + INITIALIZER(arp, init_late), +}; + +__noreturn void iothread_main() +{ + assert(current_cpu_id() == IO_CPU); + + BUG_ON(run_init_handlers(init_handlers, sizeof(init_handlers) / sizeof(struct init_entry))); + atomic_store_rel(&proc->ready, true); + log_info("io: initialized on CPU %d(%d)", current_cpu_id(), hw_cpu_id(current_cpu_id())); + +#ifdef SKYLOFT_STAT + uint64_t next_log_time = now_us(); +#endif + + for (;;) { + rx_burst(); + + cmd_rx_burst(); + + tx_drain_completions(proc, IO_OVERFLOW_BATCH_DRAIN); + + tx_burst(); + +#ifdef SKYLOFT_STAT + if (now_us() > next_log_time) { + print_stats(); + dpdk_print_eth_stats(); + next_log_time += LOG_INTERVAL_US; + } +#endif + } +} diff --git a/libos/io/rx.c b/libos/io/rx.c new file mode 100644 index 0000000..4e00735 --- /dev/null +++ b/libos/io/rx.c @@ -0,0 +1,158 @@ +/* + * rx.c - the receive path for the I/O kernel (network -> runtimes) + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define MBUF_CACHE_SIZE 250 +#define RX_PREFETCH_STRIDE 2 + +/* + * Prepend rx_net_hdr preamble to ingress packets. + */ +static struct rx_net_hdr *rx_prepend_rx_preamble(struct rte_mbuf *buf) +{ + struct rx_net_hdr *net_hdr; + uint64_t masked_ol_flags; + + net_hdr = (struct rx_net_hdr *)rte_pktmbuf_prepend(buf, (uint16_t)sizeof(*net_hdr)); + RTE_ASSERT(net_hdr != NULL); + + net_hdr->completion_data = (unsigned long)buf; + net_hdr->len = rte_pktmbuf_pkt_len(buf) - sizeof(*net_hdr); + net_hdr->rss_hash = buf->hash.rss; + masked_ol_flags = buf->ol_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK; + if (masked_ol_flags == RTE_MBUF_F_RX_IP_CKSUM_GOOD) + net_hdr->csum_type = CHECKSUM_TYPE_UNNECESSARY; + else + net_hdr->csum_type = CHECKSUM_TYPE_NEEDED; + net_hdr->csum = 0; /* unused for now */ + + return net_hdr; +} + +/** + * rx_send_to_runtime - enqueues a command to an RXQ for a runtime + * @p: the runtime's proc structure + * @hash: the 5-tuple hash for the flow the command is related to + * @cmd: the command to send + * @payload: the command payload to send + * + * Returns true if the command was enqueued, otherwise a thread is not running + * and can't be woken or the queue was full. + */ +bool rx_send_to_runtime(struct proc *p, uint32_t hash, uint64_t cmd, uint64_t payload) +{ + struct kthread *t; + + if (p->nr_ks > 0) { + /* load balance between active threads */ + t = &p->all_ks[hash % p->nr_ks]; + } else { + BUG(); + } + + return lrpc_send(&t->rxq, cmd, payload); +} + +static bool rx_send_pkt_to_runtime(struct proc *p, struct rx_net_hdr *hdr) +{ + return rx_send_to_runtime(p, hdr->rss_hash, RX_NET_RECV, (uint64_t)hdr); +} + +static void rx_one_pkt(struct rte_mbuf *buf) +{ + struct rte_ether_hdr *ptr_mac_hdr; + struct rte_ether_addr *ptr_dst_addr; + struct rx_net_hdr *net_hdr; + int n = 0; + + ptr_mac_hdr = rte_pktmbuf_mtod(buf, struct rte_ether_hdr *); + ptr_dst_addr = &ptr_mac_hdr->dst_addr; + log_debug("rx: rx packet with MAC %02" PRIx8 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 + " %02" PRIx8, + ptr_dst_addr->addr_bytes[0], ptr_dst_addr->addr_bytes[1], ptr_dst_addr->addr_bytes[2], + ptr_dst_addr->addr_bytes[3], ptr_dst_addr->addr_bytes[4], + ptr_dst_addr->addr_bytes[5]); + + /* handle unicast destinations (send to a single runtime) */ + if (likely(rte_is_unicast_ether_addr(ptr_dst_addr))) { + net_hdr = rx_prepend_rx_preamble(buf); + + if (!rx_send_pkt_to_runtime(proc, net_hdr)) { + log_debug("rx: failed to send unicast packet to runtime"); + rte_pktmbuf_free(buf); + } + return; + } + + /* handle broadcast destinations (send to all runtimes) */ + if (rte_is_broadcast_ether_addr(ptr_dst_addr)) { + net_hdr = rx_prepend_rx_preamble(buf); + + if (rx_send_pkt_to_runtime(proc, net_hdr)) { + n++; + } else { + log_debug("rx: failed to enqueue broadcast packet to runtime"); + } + + if (!n) { + rte_pktmbuf_free(buf); + return; + } + rte_mbuf_refcnt_update(buf, n - 1); + return; + } + + rte_pktmbuf_free(buf); +} + +/* + * Process a batch of incoming packets. + */ +bool rx_burst(void) +{ + struct rte_mbuf *bufs[IO_RX_BURST_SIZE]; + uint16_t nb_rx, i; + + /* retrieve packets from NIC queue */ + nb_rx = rte_eth_rx_burst(io->port_id, 0, bufs, IO_RX_BURST_SIZE); + if (nb_rx > 0) + log_debug("rx: received %d packet(s) on port %d", nb_rx, io->port_id); + + for (i = 0; i < nb_rx; i++) { + if (i + RX_PREFETCH_STRIDE < nb_rx) { + prefetch(rte_pktmbuf_mtod(bufs[i + RX_PREFETCH_STRIDE], char *)); + } + rx_one_pkt(bufs[i]); + } + + return nb_rx > 0; +} + +/* + * Initialize rx state. + */ +int rx_init() +{ + /* create a mempool in shared memory to hold the rx mbufs */ + io->rx_mbuf_pool = rte_pktmbuf_pool_create("RX_MBUF_POOL", IO_NUM_MBUFS, MBUF_CACHE_SIZE, 0, + RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); + if (io->rx_mbuf_pool == NULL) { + return -ENOMEM; + } + + return 0; +} diff --git a/libos/io/tx.c b/libos/io/tx.c new file mode 100644 index 0000000..94ab60c --- /dev/null +++ b/libos/io/tx.c @@ -0,0 +1,304 @@ +/* + * tx.c - the transmission path for the I/O kernel (runtimes -> network) + */ + +#include +#include +#include + +#include +#include +#include + +#define TX_PREFETCH_STRIDE 2 + +/* + * Private data stored in egress mbufs, used to send completions to runtimes. + */ +struct tx_pktmbuf_priv { + struct proc *p; + struct kthread *t; + unsigned long completion_data; +}; + +static inline struct tx_pktmbuf_priv *tx_pktmbuf_get_priv(struct rte_mbuf *buf) +{ + return (struct tx_pktmbuf_priv *)(((char *)buf) + sizeof(struct rte_mbuf)); +} + +/* + * Prepare rte_mbuf struct for transmission. + */ +static void tx_prepare_tx_mbuf(struct rte_mbuf *buf, const struct tx_net_hdr *net_hdr, + struct kthread *t) +{ + struct tx_pktmbuf_priv *priv_data; + uint32_t page_number; + + /* initialize mbuf to point to net_hdr->payload */ + buf->buf_addr = (char *)net_hdr->payload; + // rte_mbuf_iova_set(buf, (rte_iova_t)buf->buf_addr); + page_number = PGN_2MB((uintptr_t)buf->buf_addr - (uintptr_t)io->tx_region.base); + rte_mbuf_iova_set(buf, page_paddrs[page_number] + PGOFF_2MB(buf->buf_addr)); + buf->data_off = 0; + rte_mbuf_refcnt_set(buf, 1); + + buf->buf_len = net_hdr->len; + buf->pkt_len = net_hdr->len; + buf->data_len = net_hdr->len; + + buf->ol_flags = 0; + if (net_hdr->olflags != 0) { + if (net_hdr->olflags & OLFLAG_IP_CHKSUM) + buf->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM; + if (net_hdr->olflags & OLFLAG_TCP_CHKSUM) + buf->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM; + if (net_hdr->olflags & OLFLAG_IPV4) + buf->ol_flags |= RTE_MBUF_F_TX_IPV4; + if (net_hdr->olflags & OLFLAG_IPV6) + buf->ol_flags |= RTE_MBUF_F_TX_IPV6; + + buf->l4_len = sizeof(struct rte_tcp_hdr); + buf->l3_len = sizeof(struct rte_ipv4_hdr); + buf->l2_len = RTE_ETHER_HDR_LEN; + } + + /* initialize the private data, used to send completion events */ + priv_data = tx_pktmbuf_get_priv(buf); + priv_data->p = proc; + priv_data->t = t; + priv_data->completion_data = net_hdr->completion_data; +} + +/* + * Send a completion event to the runtime for the mbuf pointed to by obj. + */ +bool tx_send_completion(void *obj) +{ + struct rte_mbuf *buf; + struct tx_pktmbuf_priv *priv_data; + struct kthread *t; + struct proc *p; + + buf = (struct rte_mbuf *)obj; + priv_data = tx_pktmbuf_get_priv(buf); + p = priv_data->p; + + /* during initialization, the mbufs are enqueued for the first time */ + if (unlikely(!p)) + return true; + + /* check if runtime is still registered */ + if (unlikely(p->exited)) + return true; /* no need to send a completion */ + + /* send completion to runtime */ + t = priv_data->t; + if (!t->parked) { + if (likely(lrpc_send(&t->rxq, RX_NET_COMPLETE, priv_data->completion_data))) + return true; + } else { + if (likely( + rx_send_to_runtime(p, p->next_rr++, RX_NET_COMPLETE, priv_data->completion_data))) + return true; + } + + if (unlikely(p->nr_overflows == p->max_overflows)) { + log_warn("tx: Completion overflow queue is full"); + return false; + } + p->overflow_queue[p->nr_overflows++] = priv_data->completion_data; + log_debug("tx: failed to send completion to runtime"); + + return true; +} + +int tx_drain_completions(struct proc *p, int n) +{ + int i = 0; + while (p->nr_overflows > 0 && i < n) { + if (!rx_send_to_runtime(p, p->next_rr++, RX_NET_COMPLETE, + p->overflow_queue[--p->nr_overflows])) { + p->nr_overflows++; + break; + } + i++; + } + return i; +} + +static int tx_drain_queue(struct kthread *t, int n, const struct tx_net_hdr **hdrs) +{ + int i; + + for (i = 0; i < n; i++) { + uint64_t cmd; + unsigned long payload; + + if (!lrpc_recv(&t->txpktq, &cmd, &payload)) + break; + + /* TODO: need to kill the process? */ + BUG_ON(cmd != TXPKT_NET_XMIT); + + // hdrs[i] = shmptr_to_ptr(&io->tx_region, payload, sizeof(struct tx_net_hdr)); + hdrs[i] = (struct tx_net_hdr *)payload; + /* TODO: need to kill the process? */ + BUG_ON(!hdrs[i]); + } + + return i; +} + +/* + * Process a batch of outgoing packets. + */ +bool tx_burst(void) +{ + const struct tx_net_hdr *hdrs[IO_TX_BURST_SIZE]; + static struct rte_mbuf *bufs[IO_TX_BURST_SIZE]; + struct kthread *threads[IO_TX_BURST_SIZE]; + unsigned int i, j, ret; + static unsigned int pos = 0, n_pkts = 0, n_bufs = 0; + struct kthread *t; + + /* + * Poll each kthread in each runtime until all have been polled or we + * have PKT_BURST_SIZE pkts. + */ + for (i = 0; i < proc->nr_ks; i++) { + t = &proc->all_ks[(pos + i) % proc->nr_ks]; + // if (t->parked) + // continue; + if (n_pkts >= IO_TX_BURST_SIZE) + goto full; + ret = tx_drain_queue(t, IO_TX_BURST_SIZE - n_pkts, &hdrs[n_pkts]); + for (j = n_pkts; j < n_pkts + ret; j++) threads[j] = t; + n_pkts += ret; + } + + if (n_pkts == 0) + return false; + + pos++; + +full: + + /* allocate mbufs */ + if (n_pkts - n_bufs > 0) { + ret = rte_mempool_get_bulk(io->tx_mbuf_pool, (void **)&bufs[n_bufs], n_pkts - n_bufs); + if (unlikely(ret)) { + log_warn("tx: error getting %d mbufs from mempool", n_pkts - n_bufs); + return true; + } + } + + /* fill in packet metadata */ + for (i = n_bufs; i < n_pkts; i++) { + if (i + TX_PREFETCH_STRIDE < n_pkts) + prefetch(hdrs[i + TX_PREFETCH_STRIDE]); + tx_prepare_tx_mbuf(bufs[i], hdrs[i], threads[i]); + } + + n_bufs = n_pkts; + + /* finally, send the packets on the wire */ + ret = rte_eth_tx_burst(io->port_id, 0, bufs, n_pkts); + log_debug("tx: transmitted %d packet(s) on port %d", ret, io->port_id); + + /* apply back pressure if the NIC TX ring was full */ + if (unlikely(ret < n_pkts)) { + n_pkts -= ret; + for (i = 0; i < n_pkts; i++) bufs[i] = bufs[ret + i]; + } else { + n_pkts = 0; + } + + n_bufs = n_pkts; + return true; +} + +/* + * Zero out private data for a packet + */ + +static void tx_pktmbuf_priv_init(struct rte_mempool *mp, void *opaque, void *obj, unsigned obj_idx) +{ + struct rte_mbuf *buf = obj; + struct tx_pktmbuf_priv *data = tx_pktmbuf_get_priv(buf); + memset(data, 0, sizeof(*data)); +} + +/* + * Create and initialize a packet mbuf pool for holding struct mbufs and + * handling completion events. Actual buffer memory is separate, in shared + * memory. + */ +static struct rte_mempool *tx_pktmbuf_completion_pool_create(const char *name, unsigned n, + uint16_t priv_size, int socket_id) +{ + struct rte_mempool *mp; + struct rte_pktmbuf_pool_private mbp_priv; + unsigned elt_size; + int ret; + + if (RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) != priv_size) { + log_err("tx: mbuf priv_size=%u is not aligned", priv_size); + rte_errno = EINVAL; + return NULL; + } + elt_size = sizeof(struct rte_mbuf) + (unsigned)priv_size; + mbp_priv.mbuf_data_room_size = 0; + mbp_priv.mbuf_priv_size = priv_size; + + mp = rte_mempool_create_empty(name, n, elt_size, 0, sizeof(struct rte_pktmbuf_pool_private), + socket_id, 0); + if (mp == NULL) + return NULL; + + ret = rte_mempool_set_ops_byname(mp, "completion", NULL); + if (ret != 0) { + log_err("tx: error setting mempool handler"); + rte_mempool_free(mp); + rte_errno = -ret; + return NULL; + } + rte_pktmbuf_pool_init(mp, &mbp_priv); + + ret = rte_mempool_populate_default(mp); + if (ret < 0) { + rte_mempool_free(mp); + rte_errno = -ret; + return NULL; + } + + rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL); + rte_mempool_obj_iter(mp, tx_pktmbuf_priv_init, NULL); + + return mp; +} + +/* + * Initialize tx state. + */ +int tx_init() +{ + int ret; + + /* register completion ops */ + extern struct rte_mempool_ops ops_completion; + if ((ret = rte_mempool_register_ops(&ops_completion)) < 0) { + log_err("tx: couldn't register completion ops"); + return ret; + } + + /* create a mempool to hold struct rte_mbufs and handle completions */ + io->tx_mbuf_pool = tx_pktmbuf_completion_pool_create( + "TX_MBUF_POOL", IO_NUM_COMPLETIONS, sizeof(struct tx_pktmbuf_priv), rte_socket_id()); + if (io->tx_mbuf_pool == NULL) { + log_err("tx: couldn't create tx mbuf pool"); + return -1; + } + + return 0; +} diff --git a/libos/libos.c b/libos/libos.c new file mode 100644 index 0000000..169677b --- /dev/null +++ b/libos/libos.c @@ -0,0 +1,270 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +__usec g_boot_time_us; +int g_app_id; +__thread int g_logic_cpu_id; +__thread bool thread_init_done; + +struct metadata *shm_metadata; +struct proc *shm_apps; +struct proc *proc; + +static thread_fn_t saved_app_main; +static void *saved_app_arg; +static atomic_int all_init_done; + +static initializer_fn_t global_init_hook; +static initializer_fn_t percpu_init_hook; +static initializer_fn_t late_init_hook; + +static const struct init_entry init_handlers[] = { + INITIALIZER(global, init), + INITIALIZER(signal, init), + INITIALIZER(platform, init), + + /* memory management */ + INITIALIZER(page, init), + INITIALIZER(slab, init), + INITIALIZER(smalloc, init), + + /* scheduler */ + INITIALIZER(sched, init), + INITIALIZER(proc, init), +#ifdef SKYLOFT_DPDK + INITIALIZER(iothread, init), +#endif +}; + +static const struct init_entry init_handlers_percpu[] = { + /* memory memangement */ + INITIALIZER(percpu, init), + INITIALIZER(page, init_percpu), + INITIALIZER(smalloc, init_percpu), + + /* scheduler */ + INITIALIZER(sched, init_percpu), + INITIALIZER(timer, init_percpu), +}; + +static const struct init_entry late_init_handlers_percpu[] = { + /* platform */ + INITIALIZER(cpubind, init_percpu), + INITIALIZER(platform, init_percpu), +}; + +int global_init() +{ + int ret = 0; + + spin_lock(&shm_metadata->lock); + + if (!shm_metadata->nr_apps) + shm_metadata->boot_time_us = now_us(); + g_boot_time_us = shm_metadata->boot_time_us; + + if (shm_metadata->nr_apps >= MAX_APPS) { + log_err("Too many apps %d", shm_metadata->nr_apps); + ret = -EOVERFLOW; + goto out; + } + + g_app_id = shm_metadata->nr_apps++; + proc = &shm_apps[g_app_id]; + proc->id = g_app_id; + log_info("global_init: APP ID %d", g_app_id); +out: + spin_unlock(&shm_metadata->lock); + return ret; +} + +int proc_init() +{ + int i, ret = 0; + + proc->pid = getpid(); + proc->exited = false; + proc->ready = false; +#ifdef SKYLOFT_DPDK + proc->nr_ks = USED_CPUS - 1; +#else + proc->nr_ks = USED_CPUS; +#endif + for (i = 0; i < USED_CPUS; i++) { + struct kthread *k = &proc->all_ks[i]; + + spin_lock_init(&k->lock); + k->cpu = i; + k->node = cpu_numa_node(i); + k->app = proc->id; + k->parked = false; + memset(k->stats, 0, sizeof(k->stats)); + } + + return ret; +} + +int cpubind_init_percpu(void) +{ + if (is_daemon()) { + bind_to_cpu(0, g_logic_cpu_id); + } else { + BUG_ON(skyloft_park_on_cpu(g_logic_cpu_id)); + } + + log_info("CPU %d(%d): node = %d, tid = %d %p", g_logic_cpu_id, hw_cpu_id(g_logic_cpu_id), + thisk()->node, _gettid(), localk); + + return 0; +} + +static __noreturn void *percpu_entry(void *arg) +{ + int ret, cpu_id = (int)(size_t)arg; + extern __thread struct kthread *localk; + struct kthread *k; + + g_logic_cpu_id = cpu_id; + + localk = &proc->all_ks[cpu_id]; + k = thisk(); + assert(k == localk); + k->tid = _gettid(); + + ret = run_init_handlers(init_handlers_percpu, + sizeof(init_handlers_percpu) / sizeof(struct init_entry)); + if (ret < 0) { + log_err("Failed to init on CPU %d: %d", cpu_id, ret); + exit(EXIT_FAILURE); + } + + /* TODO: run it on IO_CPU if DPDK disabled */ + if (percpu_init_hook && cpu_id != IO_CPU) { + ret = percpu_init_hook(); + if (ret) { + log_err("percpu_init_hook(): failed with %d", ret); + exit(EXIT_FAILURE); + } + } + + if (saved_app_main && !cpu_id) { + if (task_spawn(0, saved_app_main, saved_app_arg, RUNTIME_LARGE_STACK_SIZE) < 0) { + panic("Cannot spawn main thread"); + } + } + +#ifndef SCHED_PERCPU + /* synchronize */ + thread_init_done = true; + atomic_fetch_add(&all_init_done, 1); + while (atomic_load(&all_init_done) < USED_CPUS); +#endif + + ret = run_init_handlers(late_init_handlers_percpu, + sizeof(late_init_handlers_percpu) / sizeof(struct init_entry)); + if (ret < 0) { + log_err("Failed to init on CPU %d: %d", cpu_id, ret); + exit(EXIT_FAILURE); + } + + if (!cpu_id && late_init_hook) { + ret = late_init_hook(); + if (ret) { + log_err("late_init_hook(): failed with %d", ret); + exit(EXIT_FAILURE); + } + } + +#ifdef SCHED_PERCPU + /* synchronize */ + thread_init_done = true; + atomic_fetch_add(&all_init_done, 1); + while (atomic_load(&all_init_done) < USED_CPUS); +#endif + +#ifdef SKYLOFT_DPDK + /* CPU reserved for I/O */ + if (cpu_id == IO_CPU) { + iothread_main(); + } else { + while (!atomic_load_acq(&proc->ready)); + + ret = net_init_percpu(); + if (ret < 0) { + log_err("net_init_percpu(): failed with %d", ret); + } + + start_schedule(); + } +#else + start_schedule(); +#endif +} + +int __api sl_set_initializers(initializer_fn_t global, initializer_fn_t percpu, + initializer_fn_t late) +{ + global_init_hook = global; + percpu_init_hook = percpu; + late_init_hook = late; + return 0; +} + +int __api sl_libos_start(thread_fn_t entry, void *arg) +{ + int ret = 0; + long int i; + + saved_app_main = entry; + saved_app_arg = arg; + + shm_metadata = mem_map_shm_file(SHM_META_PATH, NULL, sizeof(struct metadata), PGSIZE_4KB, 0); + if (shm_metadata == MAP_FAILED) + return -ENOMEM; + + shm_apps = mem_map_shm_file(SHM_APPS_PATH, NULL, sizeof(struct proc) * MAX_APPS, PGSIZE_4KB, 0); + if (shm_apps == MAP_FAILED) + return -ENOMEM; + + ret = run_init_handlers(init_handlers, sizeof(init_handlers) / sizeof(struct init_entry)); + if (ret < 0) { + log_err("Failed to init: %d", ret); + return -1; + } + + if (global_init_hook) { + ret = global_init_hook(); + if (ret) { + log_err("global_init_hook(): failed with %d", ret); + return ret; + } + } + + for (i = 1; i < USED_CPUS; i++) { + pthread_create(&proc->all_ks[i].ph, NULL, percpu_entry, (void *)i); + } + + percpu_entry((void *)0); +} diff --git a/libos/libos.ld b/libos/libos.ld new file mode 100644 index 0000000..4efccd7 --- /dev/null +++ b/libos/libos.ld @@ -0,0 +1,11 @@ +SECTIONS +{ + __percpu_load = .; + .percpu 0 : AT(__percpu_load) { + PROVIDE(__percpu_start = .); + *(.percpu) + PROVIDE(__percpu_end = .); + } + . = __percpu_load + SIZEOF(.percpu); +} +INSERT AFTER .text diff --git a/libos/mm/mempool.c b/libos/mm/mempool.c new file mode 100644 index 0000000..8171ce5 --- /dev/null +++ b/libos/mm/mempool.c @@ -0,0 +1,160 @@ +/* + * mempool.c - a simple, preallocated, virtually contiguous pool of memory + * + * For convenience with DMA operations, items are not allowed to straddle page + * boundaries. + */ + +#include +#include +#include + +#include +#include +#include + +#ifdef DEBUG + +static void mempool_common_check(struct mempool *m, void *item) +{ + uintptr_t pos = (uintptr_t)item; + uintptr_t start = (uintptr_t)m->buf; + + /* is the item within the bounds of the pool */ + assert(pos >= start && pos < start + m->len); + + /* is the item properly aligned */ + assert((start & (m->pg_sz - 1)) % m->item_len == 0); +} + +void __mempool_alloc_debug_check(struct mempool *m, void *item) +{ + mempool_common_check(m, item); + + /* poison the item */ + memset(item, 0xAB, m->item_len); +} + +void __mempool_free_debug_check(struct mempool *m, void *item) +{ + mempool_common_check(m, item); + + /* poison the item */ + memset(item, 0xCD, m->item_len); +} + +#endif /* DEBUG */ + +static int mempool_populate(struct mempool *m, void *buf, size_t len, size_t pg_sz, size_t item_len) +{ + size_t items_per_page = pg_sz / item_len; + size_t nr_pages = len / pg_sz; + size_t i, j; + + m->free_items = calloc(nr_pages * items_per_page, sizeof(void *)); + if (!m->free_items) + return -ENOMEM; + + for (i = 0; i < nr_pages; i++) { + for (j = 0; j < items_per_page; j++) { + m->free_items[m->capacity++] = (char *)buf + pg_sz * i + item_len * j; + } + } + + return 0; +} + +/** + * mempool_create - initializes a memory pool + * @m: the memory pool to initialize + * @buf: the start of the buffer region managed by the pool + * @len: the length of the buffer region managed by the pool + * @pg_sz: the size of the pages in the buffer region (must be uniform) + * @item_len: the length of each item in the pool + */ +int mempool_create(struct mempool *m, void *buf, size_t len, size_t pg_sz, size_t item_len) +{ + if (item_len == 0 || !is_power_of_two(pg_sz) || len % pg_sz != 0) + return -EINVAL; + + m->allocated = 0; + m->buf = buf; + m->len = len; + m->pg_sz = pg_sz; + m->item_len = item_len; + + return mempool_populate(m, buf, len, pg_sz, item_len); +} + +/** + * mempool_destroy - tears down a memory pool + * @m: the memory pool to tear down + */ +void mempool_destroy(struct mempool *m) +{ + free(m->free_items); +} + +struct mempool_tc { + struct mempool *m; + spinlock_t lock; +}; + +static void mempool_tcache_free(struct tcache *tc, int nr, void **items) +{ + int i; + + struct mempool_tc *mptc = (struct mempool_tc *)tc->data; + + spin_lock(&mptc->lock); + for (i = 0; i < nr; i++) { + mempool_free(mptc->m, items[i]); + } + spin_unlock(&mptc->lock); +} + +static int mempool_tcache_alloc(struct tcache *tc, int nr, void **items) +{ + int i; + + struct mempool_tc *mptc = (struct mempool_tc *)tc->data; + + spin_lock(&mptc->lock); + for (i = 0; i < nr; i++) { + items[i] = mempool_alloc(mptc->m); + if (items[i] == NULL) { + spin_unlock(&mptc->lock); + mempool_tcache_free(tc, i, items); + return -ENOMEM; + } + } + spin_unlock(&mptc->lock); + return 0; +} + +static const struct tcache_ops mempool_tcache_ops = { + .alloc = mempool_tcache_alloc, + .free = mempool_tcache_free, +}; + +struct tcache *mempool_create_tcache(struct mempool *m, const char *name, unsigned int mag_size) +{ + struct mempool_tc *mptc; + struct tcache *tc; + + mptc = malloc(sizeof(*mptc)); + if (mptc == NULL) + return NULL; + + mptc->m = m; + spin_lock_init(&mptc->lock); + + tc = tcache_create(name, &mempool_tcache_ops, mag_size, m->item_len); + if (!tc) { + free(mptc); + return NULL; + } + + tc->data = (unsigned long)mptc; + return tc; +} diff --git a/libos/mm/page.c b/libos/mm/page.c new file mode 100644 index 0000000..06fe1cf --- /dev/null +++ b/libos/mm/page.c @@ -0,0 +1,396 @@ +/* + * page.c - the page allocator + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * This pointer contains an array of page structs, organized as follows: + * [NUMA 0 pages] [NUMA 1 pages] ... [NUMA N pages] + */ +struct page *page_tbl; + +/* large page (2MB) definitions */ +struct lgpage_node { + spinlock_t lock; + unsigned int idx; + struct page *tbl; /* aliases page_tbl above */ + struct list_head pages; + uint64_t pad[4]; +} __aligned(CACHE_LINE_SIZE); +static struct lgpage_node lgpage_nodes[MAX_NUMA]; + +/* small page (4KB) definitions */ +extern struct slab smpage_slab; /* defined in mm/slab.c */ +extern struct tcache *smpage_tcache; +static __thread struct tcache_percpu smpage_pt; + +#ifdef DEBUG + +static void page_check(struct page *pg, size_t pgsize) +{ + /* since the page is allocated, it must be marked in use */ + assert(pg->flags & PAGE_FLAG_IN_USE); + + /* check for unsupported page sizes */ + assert(pgsize == PGSIZE_4KB || pgsize == PGSIZE_2MB); + + /* finally verify the page is configured correctly for its size */ + assert(page_to_size(pg) == pgsize); + if (pgsize == PGSIZE_4KB) { + assert(!(pg->flags & PAGE_FLAG_SHATTERED)); + pg = smpage_to_lgpage(pg); + assert(pg->flags & PAGE_FLAG_LARGE); + assert(pg->flags & PAGE_FLAG_SHATTERED); + } + + /* check that the lgpage is inside the table */ + assert(pg - page_tbl >= 0 && pg - page_tbl < (long)(LGPAGE_META_ENTS * MAX_NUMA)); +} + +static void page_alloc_check(struct page *pg, size_t pgsize) +{ + page_check(pg, pgsize); + assert(!kref_released(&pg->ref)); + + /* poison the page */ + memset(page_to_addr(pg), 0xEF, pgsize); +} + +static void page_free_check(struct page *pg, size_t pgsize) +{ + page_check(pg, pgsize); + assert(kref_released(&pg->ref)); + + /* poison the page */ + memset(page_to_addr(pg), 0x89, pgsize); +} + +#else /* DEBUG */ + +static void page_alloc_check(struct page *pg, size_t pgsize) +{ + ; +} +static void page_free_check(struct page *pg, size_t pgsize) +{ + ; +} + +#endif /* DEBUG */ + +static int lgpage_create(struct page *pg, int numa_node) +{ + void *pgaddr = lgpage_to_addr(pg); + int ret; + + pgaddr = mem_map_anom(pgaddr, PGSIZE_2MB, PGSIZE_2MB, numa_node); + if (pgaddr == MAP_FAILED) { + log_err("page: out of 2mb pages\n"); + return -ENOMEM; + } + + ret = mem_lookup_page_phys_addr(pgaddr, PGSIZE_2MB, &pg->paddr); + if (ret) { + munmap(pgaddr, PGSIZE_2MB); + return ret; + } + + kref_init(&pg->ref); + pg->flags = PAGE_FLAG_LARGE | PAGE_FLAG_IN_USE; + return 0; +} + +static void lgpage_destroy(struct page *pg) +{ + munmap(lgpage_to_addr(pg), PGSIZE_2MB); + pg->flags = 0; + pg->paddr = 0; +} + +static struct page *lgpage_alloc_on_node(int numa_node) +{ + struct lgpage_node *node; + struct page *pg; + int ret; + + assert(numa_node < MAX_NUMA); + node = &lgpage_nodes[numa_node]; + + spin_lock(&node->lock); + pg = list_pop(&node->pages, struct page, link); + if (!pg) { + if (unlikely(node->idx >= LGPAGE_META_ENTS)) { + spin_unlock(&node->lock); + log_err_once("out of page region addresses"); + return NULL; + } + + pg = &node->tbl[node->idx++]; + } + spin_unlock(&node->lock); + + assert(!(pg->flags & PAGE_FLAG_IN_USE)); + ret = lgpage_create(pg, numa_node); + if (ret) { + log_err_once("page: unable to create 2MB page," + "node = %d, ret = %d", + numa_node, ret); + return NULL; + } + + return pg; +} + +static void lgpage_free(struct page *pg) +{ + unsigned int numa_node = addr_to_numa_node(lgpage_to_addr(pg)); + struct lgpage_node *node = &lgpage_nodes[numa_node]; + + assert(numa_node < MAX_NUMA); + lgpage_destroy(pg); + spin_lock(&node->lock); + list_add(&node->pages, &pg->link); + spin_unlock(&node->lock); +} + +static struct page *smpage_alloc_on_node(int numa_node) +{ + struct page *pg; + void *addr; + + if (thread_init_done && current_numa_node() == numa_node) { + /* if on the local node use the fast path */ + addr = tcache_alloc(&smpage_pt); + } else { + /* otherwise perform a remote slab allocation */ + addr = slab_alloc_on_node(&smpage_slab, numa_node); + } + + if (!addr) + return NULL; + + pg = addr_to_smpage(addr); + kref_init(&pg->ref); + pg->flags = PAGE_FLAG_IN_USE; + pg->paddr = addr_to_pa(addr); + return pg; +} + +static void smpage_free(struct page *pg) +{ + void *addr = smpage_to_addr(pg); + int numa_node = addr_to_numa_node(addr); + + assert(numa_node < MAX_NUMA); + pg->flags = 0; + + if (thread_init_done && current_numa_node() == numa_node) { + /* if on the local node use the fast path */ + tcache_free(&smpage_pt, addr); + } else { + /* otherwise perform a remote slab free */ + slab_free(&smpage_slab, addr); + } +} + +/** + * page_alloc_on_node - allocates a page for a NUMA node + * @pgsize: the size of the page + * @numa_node: the NUMA node the page is allocated from + * + * Returns a page, or NULL if an error occurred. + */ +struct page *page_alloc_on_node(size_t pgsize, int numa_node) +{ + struct page *pg; + + switch (pgsize) { + case PGSIZE_4KB: + pg = smpage_alloc_on_node(numa_node); + break; + case PGSIZE_2MB: + pg = lgpage_alloc_on_node(numa_node); + break; + default: + /* unsupported page size */ + pg = NULL; + } + + page_alloc_check(pg, pgsize); + return pg; +} + +/** + * page_alloc - allocates a page + * @pgsize: the size of the page + * + * Returns a page, or NULL if out of memory. + */ +struct page *page_alloc(size_t pgsize) +{ + return page_alloc_on_node(pgsize, current_numa_node()); +} + +/** + * page_zalloc - allocates a zeroed page + * @pgsize: the size of the page + * + * Returns a page, or NULL if out of memory. + */ +struct page *page_zalloc(size_t pgsize) +{ + void *addr; + struct page *pg = page_alloc(pgsize); + if (!pg) + return NULL; + + addr = page_to_addr(pg); + memset(addr, 0, pgsize); + return pg; +} + +/** + * page_alloc_addr_on_node - allocates a page address on for a NUMA node + * @pgsize: the size of the page + * @numa_node: the NUMA node the page is allocated from + * + * Returns a pointer to page data, or NULL if an error occurred. + */ +void *page_alloc_addr_on_node(size_t pgsize, int numa_node) +{ + struct page *pg = page_alloc_on_node(pgsize, numa_node); + if (!pg) + return NULL; + return page_to_addr(pg); +} + +/** + * page_alloc_addr - allocates a page address + * @pgsize: the size of the page + * + * Returns a pointer to page data, or NULL if an error occurred. + */ +void *page_alloc_addr(size_t pgsize) +{ + return page_alloc_addr_on_node(pgsize, current_numa_node()); +} + +/** + * page_zalloc_addr_on_node - allocates a zeroed page address for a NUMA node + * @pgsize: the size of the page + * @numa_node: the NUMA node the page is allocated from + * + * Returns a pointer to zeroed page data, or NULL if an error occurred. + */ +void *page_zalloc_addr_on_node(size_t pgsize, int numa_node) +{ + void *addr = page_alloc_addr_on_node(pgsize, numa_node); + if (addr) + memset(addr, 0, pgsize); + return addr; +} + +/** + * page_alloc_addr - allocates a page address + * @pgsize: the size of the page + * + * Returns a pointer to zeroed page data, or NULL if an error occurred. + */ +void *page_zalloc_addr(size_t pgsize) +{ + void *addr = page_alloc_addr(pgsize); + if (addr) + memset(addr, 0, pgsize); + return addr; +} + +/** + * page_put_addr - decrements underlying page's reference count + * @addr: a pointer to the page data + */ +void page_put_addr(void *addr) +{ + assert(is_page_addr(addr)); + page_put(addr_to_page(addr)); +} + +/** + * page_release - frees a page + * @kref: the embedded kref struct inside a page + */ +void page_release(struct kref *ref) +{ + struct page *pg = container_of(ref, struct page, ref); + size_t pgsize = page_to_size(pg); + page_free_check(pg, pgsize); + + switch (pgsize) { + case PGSIZE_4KB: + smpage_free(pg); + break; + case PGSIZE_2MB: + lgpage_free(pg); + break; + default: + /* unsupported page size */ + panic("page: tried to free an invalid page size %ld", pgsize); + } +} + +/** + * page_init - initializes the page subsystem + */ +int page_init(void) +{ + struct lgpage_node *node; + void *addr; + int i; + + /* First reserve address-space for the page table. */ + addr = mmap(NULL, LGPAGE_META_LEN * MAX_NUMA + PGSIZE_2MB - 1, PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (addr == MAP_FAILED) + return -ENOMEM; + + /* Align to the next 2MB boundary. */ + addr = (void *)align_up((uintptr_t)addr, PGSIZE_2MB); + + /* Then map NUMA-local large pages on top. */ + for (i = 0; i < MAX_NUMA; i++) { + node = &lgpage_nodes[i]; + node->tbl = mem_map_anom((char *)addr + i * LGPAGE_META_LEN, + LGPAGE_META_NR_LGPAGES * PGSIZE_2MB, PGSIZE_2MB, i); + if (node->tbl == MAP_FAILED) + return -ENOMEM; + + spin_lock_init(&node->lock); + list_head_init(&node->pages); + node->idx = 0; + } + + page_tbl = addr; + return 0; +} + +/** + * page_init_thread - initializes the page subsystem for a thread + */ +int page_init_percpu(void) +{ + tcache_init_percpu(smpage_tcache, &smpage_pt); + return 0; +} diff --git a/libos/mm/slab.c b/libos/mm/slab.c new file mode 100644 index 0000000..9d9e90e --- /dev/null +++ b/libos/mm/slab.c @@ -0,0 +1,508 @@ +/* + * slab.c - the SLAB memory allocator + * + * Note that we don't re-use initialized slab items like Solaris. Rather this + * implementation just applies a similar approach to memory management. See the + * following references for details: + * + * The Slab Allocator: An Object-Caching Kernel Memory Allocator + * Jeff Bonwick + * + * The SLAB allocator is designed for simplicity rather than for multicore + * scalability. When scalability is required use the thread-local cache on top + * of the SLAB allocator. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include + +/* embedded within free items. */ +struct slab_hdr { + struct slab_hdr *next_hdr; +}; +BUILD_ASSERT(sizeof(struct slab_hdr) <= SLAB_MIN_SIZE); + +#define SLAB_PARTIAL_THRESH (SLAB_CHUNK_SIZE / 2) + +#define SLAB_4KB_LIMIT (align_down(PGSIZE_4KB / (SLAB_PARTIAL_THRESH * 2), __WORD_SIZE)) +#define SLAB_2MB_LIMIT (align_down(PGSIZE_2MB / (SLAB_PARTIAL_THRESH * 2), __WORD_SIZE)) + +static DEFINE_LIST_HEAD(slab_list); +static DEFINE_SPINLOCK(slab_lock); +static struct slab node_slab; + +struct slab smpage_slab; +struct tcache *smpage_tcache; +#define SMPAGE_MAG_SIZE 8 + +/* initialization bootstrapping */ +static struct slab_node early_slab_nodes[MAX_NUMA]; +static struct slab_node early_smpage_nodes[MAX_NUMA]; + +static void __slab_create_node(struct slab_node *n, int numa_node, size_t size, int offset, + int flags, int nr_elems) +{ + n->numa_node = numa_node; + n->size = size; + n->offset = offset; + n->flags = flags; + n->nr_elems = nr_elems; + + n->cur_pg = NULL; + n->pg_off = 0; + n->nr_pages = 0; + + spin_lock_init(&n->page_lock); + list_head_init(&n->full_list); + list_head_init(&n->partial_list); +} + +static void __slab_destroy_node(struct slab_node *n) +{ + struct page *pg, *pg_next; + + list_for_each_safe(&n->full_list, pg, pg_next, link) page_put(pg); + list_for_each_safe(&n->partial_list, pg, pg_next, link) page_put(pg); + if (n->cur_pg) + page_put(n->cur_pg); +} + +static int __slab_create(struct slab *s, const char *name, size_t size, int offset, int flags, + int nr_elems) +{ + struct slab_node *n; + int i; + + for (i = 0; i < MAX_NUMA; i++) { + n = (struct slab_node *)slab_alloc_on_node(&node_slab, i); + if (!n) + goto fail; + + __slab_create_node(n, i, size, offset, flags, nr_elems); + s->nodes[i] = n; + } + + spin_lock(&slab_lock); + list_add_tail(&slab_list, &s->link); + spin_unlock(&slab_lock); + s->name = name; + s->size = size; + return 0; + +fail: + for (i--; i >= 0; i--) slab_free(&node_slab, s->nodes[i]); + return -ENOMEM; +} + +static void __slab_early_create(struct slab *s, struct slab_node *nodes, const char *name, + size_t size, int offset, int flags, int nr_elems) +{ + int i; + + for (i = 0; i < MAX_NUMA; i++) { + __slab_create_node(&nodes[i], i, size, offset, flags, nr_elems); + s->nodes[i] = &nodes[i]; + } + + spin_lock(&slab_lock); + list_add(&slab_list, &s->link); + spin_unlock(&slab_lock); + s->name = name; + s->size = size; +} + +static int __slab_early_migrate(struct slab *s) +{ + struct slab_node *n; + int i; + + for (i = 0; i < MAX_NUMA; i++) { + n = (struct slab_node *)slab_alloc_on_node(&node_slab, i); + if (!n) + goto fail; + + memcpy(n, s->nodes[i], sizeof(*n)); + assert(list_empty(&s->nodes[i]->full_list)); + list_head_init(&n->full_list); + assert(list_empty(&s->nodes[i]->partial_list)); + list_head_init(&n->partial_list); + s->nodes[i] = n; + if (n->cur_pg) + n->cur_pg->snode = n; + } + + return 0; + +fail: + for (i--; i >= 0; i--) slab_free(&node_slab, s->nodes[i]); + return -ENOMEM; +} + +/** + * slab_create - creates a slab + * @s: the slab + * @name: a human readable name + * @size: the size of items + * @flag: flags + * + * Returns 0 if successful, otherwise fail. + */ +int slab_create(struct slab *s, const char *name, size_t size, int flags) +{ + int pgsize; + + /* force cache line size alignment to prevent false sharing */ + if (!(flags & SLAB_FLAG_FALSE_OKAY)) + size = align_up(size, CACHE_LINE_SIZE); + else + size = align_up(size, __WORD_SIZE); + + if (size > SLAB_2MB_LIMIT) + return -E2BIG; + if (size > SLAB_4KB_LIMIT) + flags |= SLAB_FLAG_LGPAGE; + + pgsize = (flags & SLAB_FLAG_LGPAGE) ? PGSIZE_2MB : PGSIZE_4KB; + return __slab_create(s, name, size, 0, flags, pgsize / size); +} + +/** + * slab_destroy - destroys a slab + * @s: the slab + * + * WARNING: Frees all pages belonging to the slab, so unsafe + * to call until all references to the slab's items have been + * dropped. + */ +void slab_destroy(struct slab *s) +{ + int i; + + spin_lock(&slab_lock); + list_del(&s->link); + spin_unlock(&slab_lock); + + for (i = 0; i < MAX_NUMA; i++) { + __slab_destroy_node(s->nodes[i]); + slab_free(&node_slab, s->nodes[i]); + } +} + +#ifdef DEBUG + +static void slab_item_check(struct slab_node *n, void *item) +{ + struct page *pg; + + if (n->flags & SLAB_FLAG_LGPAGE) + pg = addr_to_lgpage(item); + else + pg = addr_to_smpage(item); + + /* alignment */ + if (n->flags & SLAB_FLAG_LGPAGE) + assert(PGOFF_2MB(item) % n->size == 0); + else + assert(PGOFF_4KB(item) % n->size == 0); + + if (unlikely(!thread_init_done)) + return; + + /* NUMA node checks */ + assert(n->numa_node == current_numa_node()); + assert(addr_to_numa_node(item) == current_numa_node()); + + /* page checks */ + assert(is_page_addr(item)); + assert(pg->flags & PAGE_FLAG_SLAB); + assert(pg->snode == n); +} + +void slab_alloc_check(struct slab_node *n, void *item) +{ + slab_item_check(n, item); + + /* poison the item */ + memset(item, 0xAB, n->size); +} + +void slab_free_check(struct slab_node *n, void *item) +{ + slab_item_check(n, item); + + /* poison the item */ + memset(item, 0xCD, n->size); +} + +#else /* DEBUG */ + +static void slab_alloc_check(struct slab_node *n, void *item) +{ + ; +} +static void slab_free_check(struct slab_node *n, void *item) +{ + ; +} + +#endif /* DEBUG */ + +static struct page *__slab_node_get_page(struct slab_node *n) +{ + struct page *pg = list_pop(&n->partial_list, struct page, link); + if (!pg) { + int pgsize = (n->flags & SLAB_FLAG_LGPAGE) ? PGSIZE_2MB : PGSIZE_4KB; + + pg = page_alloc_on_node(pgsize, n->numa_node); + if (likely(pg)) { + pg->flags |= PAGE_FLAG_SLAB; + if (n->flags & SLAB_FLAG_PAGES) { + pg->flags |= PAGE_FLAG_SHATTERED; + memset(page_to_addr(pg), 0, n->offset); + } + pg->snode = n; + n->pg_off = n->offset; + pg->item_count = n->nr_elems; + pg->next = NULL; + } + } + + return pg; +} + +static void *__slab_node_alloc(struct slab_node *n) +{ + struct slab_hdr *hdr; + + assert_spin_lock_held(&n->page_lock); + + if (!n->cur_pg || !n->cur_pg->item_count) { + if (n->cur_pg) + list_add(&n->full_list, &n->cur_pg->link); + n->cur_pg = __slab_node_get_page(n); + + /* ran out of memory */ + if (unlikely(!n->cur_pg)) + return NULL; + n->nr_pages++; + } + + if (n->cur_pg->next) { + hdr = (struct slab_hdr *)n->cur_pg->next; + n->cur_pg->next = (void *)hdr->next_hdr; + } else { + assert(n->pg_off < ((n->flags & SLAB_FLAG_LGPAGE) ? PGSIZE_2MB : PGSIZE_4KB)); + hdr = (struct slab_hdr *)((char *)page_to_addr(n->cur_pg) + n->pg_off); + n->pg_off += n->size; + } + + n->cur_pg->item_count--; + return (void *)hdr; +} + +/** + * slab_alloc_on_node - allocates an item from a slab + * @s: the slab + * @numa_node: the numa node + * + * Returns an item, or NULL if out of memory. + */ +void *slab_alloc_on_node(struct slab *s, int numa_node) +{ + struct slab_node *n = s->nodes[numa_node]; + void *item; + + spin_lock(&n->page_lock); + item = __slab_node_alloc(n); + spin_unlock(&n->page_lock); + + slab_alloc_check(n, item); + + return item; +} + +static void slab_node_free(struct slab_node *n, void *item) +{ + struct page *pg; + struct slab_hdr *hdr = (struct slab_hdr *)item; + bool free = false; + + if (n->flags & SLAB_FLAG_LGPAGE) + pg = addr_to_lgpage(item); + else + pg = addr_to_smpage(item); + + spin_lock(&n->page_lock); + hdr->next_hdr = pg->next; + pg->next = hdr; + pg->item_count++; + + if (pg == n->cur_pg) { + spin_unlock(&n->page_lock); + return; + } + + if (pg->item_count == SLAB_PARTIAL_THRESH) { + list_del(&pg->link); + list_add(&n->partial_list, &pg->link); + } else if (pg->item_count == n->nr_elems) { + list_del(&pg->link); + free = true; + } + spin_unlock(&n->page_lock); + + if (free) { + page_put(pg); + n->nr_pages--; + } +} + +/** + * slab_free frees an item to a slab + * @s: the slab + * @item: the item + */ +void slab_free(struct slab *s, void *item) +{ + struct slab_node *n = s->nodes[addr_to_numa_node(item)]; + slab_free_check(n, item); + slab_node_free(n, item); +} + +static int slab_tcache_alloc(struct tcache *tc, int nr, void **items) +{ + struct slab *s = (struct slab *)tc->data; + struct slab_node *n = s->nodes[current_numa_node()]; + int i; + + spin_lock(&n->page_lock); + for (i = 0; i < nr; i++) { + items[i] = __slab_node_alloc(n); + if (unlikely(!items[i])) { + spin_unlock(&n->page_lock); + goto fail; + } + } + spin_unlock(&n->page_lock); + + return 0; + +fail: + for (i--; i >= 0; i--) slab_node_free(n, items[i]); + return -ENOMEM; +} + +static void slab_tcache_free(struct tcache *tc, int nr, void **items) +{ + struct slab *s = (struct slab *)tc->data; + struct slab_node *n = s->nodes[current_numa_node()]; + int i; + + for (i = 0; i < nr; i++) slab_node_free(n, items[i]); +} + +static const struct tcache_ops slab_tcache_ops = { + .alloc = slab_tcache_alloc, + .free = slab_tcache_free, +}; + +/** + * slab_create_tcache - creates a thread-local cache of slab items + * @s: the backing slab + * @mag_size: the number of items in a magazine + * + * Returns a thread-local cache, or NULL if out of memory. + */ +struct tcache *slab_create_tcache(struct slab *s, unsigned int mag_size) +{ + struct tcache *tc; + + tc = tcache_create(s->name, &slab_tcache_ops, mag_size, s->size); + tc->data = (unsigned long)s; + return tc; +} + +/** + * slab_print_usage - prints the amount of memory used in each slab + */ +void slab_print_usage(void) +{ + struct slab *s; + size_t total = 0; + int i; + + log_info("slab: usage statistics..."); + + spin_lock(&slab_lock); + list_for_each(&slab_list, s, link) + { + size_t usage = 0; + + for (i = 0; i < MAX_NUMA; i++) { + struct slab_node *n = s->nodes[i]; + + if (n->flags & SLAB_FLAG_LGPAGE) { + usage += n->nr_pages * PGSIZE_2MB; + total += n->nr_pages * PGSIZE_2MB; + } else { + usage += n->nr_pages * PGSIZE_4KB; + } + } + + log_info("%8ld KB\t%s", usage / 1024, s->name); + } + spin_unlock(&slab_lock); + + log_info("total: %ld KB", total / 1024); +} + +/** + * slab_init - initializes the slab subsystem + * + * NOTE: assumes that pages have already been initialized. + */ +int slab_init(void) +{ + int ret; + + /* + * The node and smpage slabs depend on each other so + * we bootstrap them here. + */ + __slab_early_create(&node_slab, early_slab_nodes, "slab_node", + align_up(sizeof(struct slab_node), TCACHE_MIN_ITEM_SIZE), 0, 0, + PGSIZE_4KB / sizeof(struct slab_node)); + + __slab_early_create(&smpage_slab, early_smpage_nodes, "smpage", PGSIZE_4KB, SMPAGE_META_LEN, + (SLAB_FLAG_LGPAGE | SLAB_FLAG_PAGES), + (PGSIZE_2MB - SMPAGE_META_LEN) / PGSIZE_4KB); + + /* + * And now we migrate them to data structures with + * the proper numa affinity. + */ + ret = __slab_early_migrate(&node_slab); + if (ret) + return ret; + + ret = __slab_early_migrate(&smpage_slab); + if (ret) + return ret; + + /* + * And then finally, create the thread-local cache + * for small pages. + */ + smpage_tcache = slab_create_tcache(&smpage_slab, SMPAGE_MAG_SIZE); + if (!smpage_tcache) + return -ENOMEM; + + return 0; +} diff --git a/libos/mm/smalloc.c b/libos/mm/smalloc.c new file mode 100644 index 0000000..180bfda --- /dev/null +++ b/libos/mm/smalloc.c @@ -0,0 +1,127 @@ +/* + * smalloc.c - a simple malloc implementation built on top of the base + * libary slab and thread-local cache allocator + */ + +#include + +#include +#include +#include +#include + +#define SMALLOC_MAG_SIZE 8 +#define SMALLOC_BITS 15 +#define SMALLOC_MIN_SIZE SLAB_MIN_SIZE +#define SMALLOC_MAX_SIZE (SMALLOC_MIN_SIZE << (SMALLOC_BITS - 1)) +BUILD_ASSERT(SMALLOC_MIN_SIZE >= SLAB_MIN_SIZE); + +static struct slab smalloc_slabs[SMALLOC_BITS]; +static struct tcache *smalloc_tcaches[SMALLOC_BITS]; +static DEFINE_PERCPU(struct tcache_percpu, smalloc_pts[SMALLOC_BITS]); + +/** + * smalloc_size_to_idx - converts a size to a cache index + * @size: the size of the item to allocate + * + * Returns the smalloc cache index. + */ +static inline int smalloc_size_to_idx(size_t size) +{ + return 64 - __builtin_ctz(SMALLOC_MIN_SIZE) - __builtin_clzl((size - 1) | SMALLOC_MIN_SIZE); +} + +static const char *slab_names[SMALLOC_BITS] = { + "smalloc (16 B)", "smalloc (32 B)", "smalloc (64 B)", "smalloc (128 B)", "smalloc (256 B)", + "smalloc (512 B)", "smalloc (1 KB)", "smalloc (2 KB)", "smalloc (4 KB)", "smalloc (8 KB)", + "smalloc (16 KB)", "smalloc (32 KB)", "smalloc (64 KB)", "smalloc (128 KB)", "smalloc (256 KB)", +}; + +/** + * smalloc - allocates memory (non-inlined path) + * @size: the size of the item + * + * Returns an item or NULL if out of memory. + */ +void *smalloc(size_t size) +{ + struct tcache_percpu *pt; + void *item; + + if (unlikely(size > SMALLOC_MAX_SIZE)) + return NULL; + + preempt_disable(); + pt = &percpu_get(smalloc_pts[smalloc_size_to_idx(size)]); + item = tcache_alloc(pt); + preempt_enable(); + + return item; +} + +/** + * __szmalloc - allocates zeroed memory (non-inlined path) + * @size: the size of the item + * + * Returns an item or NULL if out of memory. + */ +void *__szalloc(size_t size) +{ + void *item = smalloc(size); + if (unlikely(!item)) + return NULL; + + memset(item, 0, size); + return item; +} + +/* + * sfree - frees memory back to the generic allocator + * @item: the item to free + */ +void sfree(void *item) +{ + struct slab_node *n = addr_to_page(item)->snode; + struct tcache_percpu *pt; + + preempt_disable(); + pt = &percpu_get(smalloc_pts[smalloc_size_to_idx(n->size)]); + tcache_free(pt, item); + preempt_enable(); +} + +/** + * smalloc_init - initializes slab malloc + * + * NOTE: requires slab to be initialized first + */ +int smalloc_init(void) +{ + int i, ret; + + for (i = 0; i < SMALLOC_BITS; i++) { + ret = slab_create(&smalloc_slabs[i], slab_names[i], (SMALLOC_MIN_SIZE << i), + SLAB_FLAG_FALSE_OKAY); + if (ret) + return ret; + + smalloc_tcaches[i] = slab_create_tcache(&smalloc_slabs[i], SMALLOC_MAG_SIZE); + if (!smalloc_tcaches[i]) + return -ENOMEM; + } + + return 0; +} + +/** + * smalloc_init_percpu - initializes slab malloc (per-CPU) + */ +int smalloc_init_percpu(void) +{ + int i; + + for (i = 0; i < SMALLOC_BITS; i++) + tcache_init_percpu(smalloc_tcaches[i], &percpu_get(smalloc_pts[i])); + + return 0; +} diff --git a/libos/mm/stack.c b/libos/mm/stack.c new file mode 100644 index 0000000..fa4205e --- /dev/null +++ b/libos/mm/stack.c @@ -0,0 +1,113 @@ +/* + * stack.c - allocates and manages per-thread stacks + */ + +#include +#include + +#include +#include +#include +#include +#include + +#define STACK_BASE_ADDR 0x200000000000UL + +static struct tcache *stack_tcache; +DEFINE_PERCPU(struct tcache_percpu, stack_percpu); + +static struct stack *stack_create(void *base) +{ + void *stack_addr; + struct stack *s; + + stack_addr = mmap(base, sizeof(struct stack), PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (stack_addr == MAP_FAILED) + return NULL; + + s = (struct stack *)stack_addr; + + return s; +} + +/* WARNING: the contents of the stack may be lost after reclaiming. */ +static void stack_reclaim(struct stack *s) +{ + int ret; + ret = madvise(s->payload, RUNTIME_STACK_SIZE, MADV_DONTNEED); + WARN_ON_ONCE(ret); +} + +static DEFINE_SPINLOCK(stack_lock); +static int free_stack_count; +static struct stack *free_stacks[MAX_TASKS]; +static atomic_long stack_pos = STACK_BASE_ADDR; + +static void stack_tcache_free(struct tcache *tc, int nr, void **items) +{ + int i; + + /* try to release the backing memory first */ + for (i = 0; i < nr; i++) stack_reclaim((struct stack *)items[i]); + + /* then make the stacks available for reallocation */ + spin_lock(&stack_lock); + for (i = 0; i < nr; i++) free_stacks[free_stack_count++] = items[i]; + BUG_ON(free_stack_count >= MAX_TASKS + TCACHE_DEFAULT_MAG_SIZE); + spin_unlock(&stack_lock); +} + +static int stack_tcache_alloc(struct tcache *tc, int nr, void **items) +{ + void *base; + int i = 0; + + spin_lock(&stack_lock); + while (free_stack_count && i < nr) { + items[i++] = free_stacks[--free_stack_count]; + } + spin_unlock(&stack_lock); + + for (; i < nr; i++) { + base = (void *)atomic_fetch_add(&stack_pos, sizeof(struct stack)); + items[i] = stack_create(base); + if (unlikely(!items[i])) + goto fail; + } + + return 0; + +fail: + log_err("stack: failed to allocate stack memory"); + stack_tcache_free(tc, i, items); + return -ENOMEM; +} + +static const struct tcache_ops stack_tcache_ops = { + .alloc = stack_tcache_alloc, + .free = stack_tcache_free, +}; + +/** + * stack_init_thread - intializes per-thread state + * Returns 0 (always successful). + */ +int stack_init_percpu(void) +{ + tcache_init_percpu(stack_tcache, &percpu_get(stack_percpu)); + return 0; +} + +/** + * stack_init - initializes the stack allocator + * Returns 0 if successful, or -ENOMEM if out of memory. + */ +int stack_init(void) +{ + stack_tcache = tcache_create("runtime_stacks", &stack_tcache_ops, TCACHE_DEFAULT_MAG_SIZE, + sizeof(struct stack)); + if (!stack_tcache) + return -ENOMEM; + return 0; +} diff --git a/libos/mm/tcache.c b/libos/mm/tcache.c new file mode 100644 index 0000000..6f48adc --- /dev/null +++ b/libos/mm/tcache.c @@ -0,0 +1,240 @@ +/* + * tcache.c - a generic thread-local item cache + * + * Based heavily on Magazines and Vmem: Extending the Slab Allocator to Many + * CPUs and Arbitrary Resources. Jeff Bonwick and Johnathan Adams. + * + * TODO: Improve NUMA awareness. + * TODO: Provide an interface to tear-down thread caches. + * TODO: Remove dependence on libc malloc(). + * TODO: Use RCU for tcache list so printing stats doesn't block creating + * new tcaches. + */ + +#include + +#include +#include +#include +#include + +#include + +static DEFINE_SPINLOCK(tcache_lock); +static DEFINE_LIST_HEAD(tcache_list); + +DEFINE_PERCPU(uint64_t, mag_alloc); +DEFINE_PERCPU(uint64_t, mag_free); +DEFINE_PERCPU(uint64_t, pool_alloc); +DEFINE_PERCPU(uint64_t, pool_free); + +static struct tcache_hdr *tcache_alloc_mag(struct tcache *tc) +{ + void *items[TCACHE_MAX_MAG_SIZE]; + struct tcache_hdr *head, **pos; + int err, i; + + percpu_get(mag_alloc)++; + + err = tc->ops->alloc(tc, tc->mag_size, items); + if (err) + return NULL; + + head = (struct tcache_hdr *)items[0]; + pos = &head->next_item; + for (i = 1; i < (int)tc->mag_size; i++) { + *pos = (struct tcache_hdr *)items[i]; + pos = &(*pos)->next_item; + } + + *pos = NULL; + atomic_inc(&tc->mags_allocated); + return head; +} + +static void tcache_free_mag(struct tcache *tc, struct tcache_hdr *hdr) +{ + void *items[TCACHE_MAX_MAG_SIZE]; + int nr = 0; + + percpu_get(mag_free)++; + + do { + items[nr++] = hdr; + hdr = hdr->next_item; + } while (hdr); + + assert(nr == (int)tc->mag_size); + tc->ops->free(tc, nr, items); + atomic_dec(&tc->mags_allocated); +} + +/* The thread-local cache allocation slow path. */ +void *__tcache_alloc(struct tcache_percpu *ltc) +{ + struct tcache *tc = ltc->tc; + void *item; + + /* must be out of rounds */ + assert(ltc->rounds == 0); + assert(ltc->loaded == NULL); + + /* CASE 1: exchange empty loaded mag with full previous mag */ + if (ltc->previous) { + ltc->loaded = ltc->previous; + ltc->previous = NULL; + goto alloc; + } + + percpu_get(pool_alloc)++; + + /* CASE 2: grab a magazine from the shared pool */ + spin_lock(&tc->lock); + ltc->loaded = tc->shared_mags; + if (tc->shared_mags) + tc->shared_mags = tc->shared_mags->next_mag; + spin_unlock(&tc->lock); + if (ltc->loaded) + goto alloc; + + /* CASE 3: allocate a new magazine */ + ltc->loaded = tcache_alloc_mag(tc); + if (unlikely(!ltc->loaded)) + return NULL; + +alloc: + /* reload the magazine and allocate an item */ + ltc->rounds = ltc->capacity - 1; + item = (void *)ltc->loaded; + ltc->loaded = ltc->loaded->next_item; + return item; +} + +/* The thread-local cache free slow path. */ +void __tcache_free(struct tcache_percpu *ltc, void *item) +{ + struct tcache *tc = ltc->tc; + struct tcache_hdr *hdr = (struct tcache_hdr *)item; + + /* magazine must be full */ + assert(ltc->rounds == ltc->capacity); + assert(ltc->loaded != NULL); + + /* CASE 1: exchange empty previous mag with full loaded mag */ + if (!ltc->previous) { + ltc->previous = ltc->loaded; + goto free; + } + + percpu_get(pool_free)++; + + /* CASE 2: return a magazine to the shared pool */ + spin_lock(&tc->lock); + ltc->previous->next_mag = tc->shared_mags; + tc->shared_mags = ltc->previous; + spin_unlock(&tc->lock); + ltc->previous = ltc->loaded; + +free: + /* start a new magazine and free the item */ + ltc->rounds = 1; + ltc->loaded = hdr; + hdr->next_item = NULL; +} + +/** + * tcache_create - creates a new thread-local cache + * @name: a human-readable name to identify the cache + * @ops: operations for allocating and freeing items that back the cache + * @mag_size: the number of items in a magazine + * @item_size: the size of each item + * + * Returns a thread cache or NULL of out of memory. + * + * After creating a thread-local cache, you'll want to attach one or more + * thread-local handles using tcache_init_percpu(). + */ +struct tcache *tcache_create(const char *name, const struct tcache_ops *ops, unsigned int mag_size, + size_t item_size) +{ + struct tcache *tc; + + /* we assume the caller is aware of the tcache size limits */ + assert(item_size >= TCACHE_MIN_ITEM_SIZE); + assert(mag_size <= TCACHE_MAX_MAG_SIZE); + + tc = malloc(sizeof(*tc)); + if (!tc) + return NULL; + + tc->name = name; + tc->ops = ops; + tc->item_size = item_size; + atomic_store(&tc->mags_allocated, 0); + tc->mag_size = mag_size; + spin_lock_init(&tc->lock); + tc->shared_mags = NULL; + + spin_lock(&tcache_lock); + list_add_tail(&tcache_list, &tc->link); + spin_unlock(&tcache_lock); + + return tc; +} + +/** + * tcache_init_percpu - intializes a per-cpu handle for a thread-local + * cache + * @tc: the thread-local cache + * @ltc: the per-thread handle + */ +void tcache_init_percpu(struct tcache *tc, struct tcache_percpu *ltc) +{ + ltc->tc = tc; + ltc->loaded = ltc->previous = NULL; + ltc->rounds = 0; + ltc->capacity = tc->mag_size; +} + +/** + * tcache_reclaim - reclaims unused memory from a thread-local cache + * @tc: the thread-local cache + */ +void tcache_reclaim(struct tcache *tc) +{ + struct tcache_hdr *hdr, *next; + + spin_lock(&tc->lock); + hdr = tc->shared_mags; + tc->shared_mags = NULL; + spin_unlock(&tc->lock); + + while (hdr) { + next = hdr->next_mag; + tcache_free_mag(tc, hdr); + hdr = next; + } +} + +/** + * tcache_print_stats - dumps usage statistics about all thread-local caches + */ +void tcache_print_usage(void) +{ + struct tcache *tc; + size_t total = 0; + + log_info("tcache: dumping usage statistics..."); + + spin_lock(&tcache_lock); + list_for_each(&tcache_list, tc, link) + { + long mags = atomic_load(&tc->mags_allocated); + size_t usage = tc->mag_size * tc->item_size * mags; + log_info("%8ld KB\t%s", usage / 1024, tc->name); + total += usage; + } + spin_unlock(&tcache_lock); + + log_info("total: %8ld KB", total / 1024); +} diff --git a/libos/net/arp.c b/libos/net/arp.c new file mode 100644 index 0000000..d394915 --- /dev/null +++ b/libos/net/arp.c @@ -0,0 +1,396 @@ +/* + * arp.c - support for address resolution protocol (ARP) + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ARP_SEED 0xD4812A53 +#define ARP_TABLE_CAPACITY 1024 +#define ARP_RETRIES 3 +#define ARP_RETRY_TIME USEC_PER_SEC +#define ARP_REPROBE_TIME (10 * USEC_PER_SEC) + +enum { + /* the MAC address is being probed */ + ARP_STATE_PROBING = 0, + /* the MAC address is valid */ + ARP_STATE_VALID, + /* the MAC address is probably valid but is being confirmed */ + ARP_STATE_VALID_BUT_REPROBING, + /* Statically configured arp entry */ + ARP_STATE_STATIC, +}; + +/* A single entry in the ARP table. */ +struct arp_entry { + /* accessed by RCU sections */ + uint32_t state; + uint32_t ip; + struct eth_addr eth; + struct rcu_hlist_node link; + + /* accessed only with @arp_lock */ + struct mbufq q; + struct rcu_head rcuh; + uint64_t ts; + int tries_left; + int pad; +}; + +static DEFINE_SPINLOCK(arp_lock); +static struct rcu_hlist_head arp_tbl[ARP_TABLE_CAPACITY]; + +int arp_static_count; +struct arp_static_entry static_entries[MAX_ARP_STATIC_ENTRIES]; + +static void arp_worker(void *arg); + +static inline int hash_ip(uint32_t ip) +{ + return hash_crc32c_one(ARP_SEED, ip) % ARP_TABLE_CAPACITY; +} + +static struct arp_entry *lookup_entry(int idx, uint32_t daddr) +{ + struct arp_entry *e; + struct rcu_hlist_node *node; + + rcu_hlist_for_each(&arp_tbl[idx], node, true) + { + e = rcu_hlist_entry(node, struct arp_entry, link); + if (e->ip == daddr) + return e; + } + + return NULL; +} + +static void release_entry(struct rcu_head *h) +{ + struct arp_entry *e = container_of(h, struct arp_entry, rcuh); + sfree(e); +} + +static void delete_entry(struct arp_entry *e) +{ + rcu_hlist_del(&e->link); + + /* free any mbufs waiting for an ARP response */ + while (!mbufq_empty(&e->q)) { + struct mbuf *m = mbufq_pop_head(&e->q); + net_error(m, EHOSTUNREACH); + mbuf_free(m); + } + + rcu_free(&e->rcuh, release_entry); +} + +static void insert_entry(struct arp_entry *e, int idx) +{ + static bool worker_running; + + rcu_hlist_add_head(&arp_tbl[idx], &e->link); + + if (unlikely(!worker_running && e->state != ARP_STATE_STATIC)) { + worker_running = true; + BUG_ON(sl_task_spawn(arp_worker, NULL, 0)); + } +} + +static struct arp_entry *create_entry(uint32_t daddr) +{ + struct arp_entry *e = smalloc(sizeof(*e)); + if (!e) + return NULL; + + e->ip = daddr; + e->state = ARP_STATE_PROBING; + e->ts = now_us(); + e->tries_left = ARP_RETRIES; + mbufq_init(&e->q); + return e; +} + +void arp_send(uint16_t op, struct eth_addr dhost, uint32_t daddr) +{ + struct mbuf *m; + struct arp_hdr *arp_hdr; + struct arp_hdr_ethip *arp_hdr_ethip; + + m = net_tx_alloc_mbuf(); + if (unlikely(!m)) + return; + + arp_hdr = mbuf_put_hdr(m, *arp_hdr); + arp_hdr->htype = hton16(ARP_HTYPE_ETHER); + arp_hdr->ptype = hton16(ETHTYPE_IP); + arp_hdr->hlen = sizeof(struct eth_addr); + arp_hdr->plen = sizeof(uint32_t); + arp_hdr->op = hton16(op); + + arp_hdr_ethip = mbuf_put_hdr(m, *arp_hdr_ethip); + arp_hdr_ethip->sender_mac = io->mac; + arp_hdr_ethip->sender_ip = hton32(io->addr); + arp_hdr_ethip->target_mac = dhost; + arp_hdr_ethip->target_ip = hton32(daddr); + + net_tx_eth_or_free(m, ETHTYPE_ARP, dhost); +} + +static void arp_age_entry(uint64_t us, struct arp_entry *e) +{ + /* check if this entry has timed out */ + if (us - e->ts < ((e->state == ARP_STATE_VALID) ? ARP_REPROBE_TIME : ARP_RETRY_TIME)) + return; + + switch (e->state) { + case ARP_STATE_PROBING: + case ARP_STATE_VALID_BUT_REPROBING: + if (e->tries_left == 0) { + delete_entry(e); + return; + } + e->tries_left--; + break; + + case ARP_STATE_VALID: + e->state = ARP_STATE_VALID_BUT_REPROBING; + e->tries_left = ARP_RETRIES; + break; + + case ARP_STATE_STATIC: + return; + + default: + panic("arp: invalid entry state %d", e->state); + } + + arp_send(ARP_OP_REQUEST, eth_addr_broadcast, e->ip); + e->ts = now_us(); +} + +static void arp_worker(void *arg) +{ + struct arp_entry *e; + struct rcu_hlist_node *node; + uint64_t us; + int i; + + /* wake up each second and update the ARP table */ + while (true) { + us = now_us(); + + for (i = 0; i < ARP_TABLE_CAPACITY; i++) { + spin_lock_np(&arp_lock); + rcu_hlist_for_each(&arp_tbl[i], node, true) + { + e = rcu_hlist_entry(node, struct arp_entry, link); + arp_age_entry(us, e); + } + spin_unlock_np(&arp_lock); + } + + timer_sleep(USEC_PER_SEC); + } +} + +static void arp_update(uint32_t daddr, struct eth_addr dhost) +{ + struct mbufq q; + int idx = hash_ip(daddr); + struct arp_entry *e; + + mbufq_init(&q); + + spin_lock_np(&arp_lock); + e = lookup_entry(idx, daddr); + if (!e) { + e = create_entry(daddr); + if (unlikely(!e)) { + spin_unlock_np(&arp_lock); + return; + } + + insert_entry(e, idx); + } else if (atomic_load_acq(&e->state) == ARP_STATE_STATIC) { + spin_unlock_np(&arp_lock); + return; + } + e->eth = dhost; + e->ts = now_us(); + atomic_store_rel(&e->state, ARP_STATE_VALID); + mbufq_merge_to_tail(&q, &e->q); + spin_unlock_np(&arp_lock); + + /* drain mbufs waiting for ARP response */ + while (!mbufq_empty(&q)) { + struct mbuf *m = mbufq_pop_head(&q); + net_tx_eth_or_free(m, ETHTYPE_IP, dhost); + } +} + +/** + * net_rx_arp - receive an ARP packet + * @m: the mbuf containing the ARP packet (eth hdr is stripped) + */ +void net_rx_arp(struct mbuf *m) +{ + uint16_t op; + bool am_target; + struct arp_hdr *arp_hdr; + struct arp_hdr_ethip *arp_hdr_ethip; + uint32_t sender_ip, target_ip; + struct eth_addr sender_mac; + + arp_hdr = mbuf_pull_hdr_or_null(m, *arp_hdr); + arp_hdr_ethip = mbuf_pull_hdr_or_null(m, *arp_hdr_ethip); + if (!arp_hdr || !arp_hdr_ethip) + goto out; + + /* make sure the arp header is valid */ + if (ntoh16(arp_hdr->htype) != ARP_HTYPE_ETHER || ntoh16(arp_hdr->ptype) != ETHTYPE_IP || + arp_hdr->hlen != sizeof(struct eth_addr) || arp_hdr->plen != sizeof(uint32_t)) + goto out; + + op = ntoh16(arp_hdr->op); + sender_ip = ntoh32(arp_hdr_ethip->sender_ip); + target_ip = ntoh32(arp_hdr_ethip->target_ip); + sender_mac = arp_hdr_ethip->sender_mac; + + /* refuse ARP packets with multicast source MAC's */ + if (eth_addr_is_multicast(&sender_mac)) + goto out; + + am_target = (io->addr == target_ip); + arp_update(sender_ip, sender_mac); + + if (am_target && op == ARP_OP_REQUEST) { + log_debug("arp: responding to arp request " + "from IP %d.%d.%d.%d " + "MAC %02X:%02X:%02X:%02X:%02X:%02X", + ((sender_ip >> 24) & 0xff), ((sender_ip >> 16) & 0xff), ((sender_ip >> 8) & 0xff), + (sender_ip & 0xff), sender_mac.addr[0], sender_mac.addr[1], sender_mac.addr[2], + sender_mac.addr[3], sender_mac.addr[4], sender_mac.addr[5]); + + arp_send(ARP_OP_REPLY, sender_mac, sender_ip); + } + +out: + mbuf_free(m); +} + +/** + * arp_lookup - retrieve a MAC address for a given IP address + * @daddr: the target IP address + * @dhost_out: A buffer to store the MAC address + * @m: the mbuf requiring the lookup (can be NULL, otherwise must start with + * a network header (L3)) + * + * Returns 0 and writes to @dhost_out if successful. Otherwise returns: + * -ENOMEM: If out of memory + * -EINPROGRESS: If the ARP request is still resolving. Takes ownership of @m. + */ +int arp_lookup(uint32_t daddr, struct eth_addr *dhost_out, struct mbuf *m) +{ + struct arp_entry *e, *newe = NULL; + int idx = hash_ip(daddr); + + /* hot-path: @daddr hits in ARP cache */ + rcu_read_lock(); + e = lookup_entry(idx, daddr); + if (likely(e && atomic_load_acq(&e->state) != ARP_STATE_PROBING)) { + *dhost_out = e->eth; + rcu_read_unlock(); + return 0; + } + rcu_read_unlock(); + + /* cold-path: solicit an ARP response */ + if (!e) { + arp_send(ARP_OP_REQUEST, eth_addr_broadcast, daddr); + newe = create_entry(daddr); + if (!newe) + return -ENOMEM; + } + + /* check again for @daddr in ARP cache; we own @m going forward */ + spin_lock_np(&arp_lock); + e = lookup_entry(idx, daddr); + if (e) { + /* entry already exists */ + if (newe) + sfree(newe); + if (e->state != ARP_STATE_PROBING) { + *dhost_out = e->eth; + spin_unlock_np(&arp_lock); + return 0; + } + } else if (newe) { + /* insert new entry */ + e = newe; + insert_entry(e, idx); + } + + /* enqueue the mbuf for later transmission */ + if (m && e) + mbufq_push_tail(&e->q, m); + spin_unlock_np(&arp_lock); + + /* if the entry was removed, assume unreachable and free */ + if (m && !e) + mbuf_free(m); + + return -EINPROGRESS; +} + +/** + * arp_init - initializes the ARP subsystem + * + * Always returns 0 for success. + */ +int arp_init(void) +{ + int i; + + spin_lock_init(&arp_lock); + for (i = 0; i < ARP_TABLE_CAPACITY; i++) rcu_hlist_init_head(&arp_tbl[i]); + + return 0; +} + +/** + * arp_init_late - starts the ARP worker thread + * + * Returns 0 if successful. + */ +int arp_init_late(void) +{ + int i, idx; + struct arp_entry *e; + + spin_lock_np(&arp_lock); + + for (i = 0; i < arp_static_count; i++) { + e = create_entry(static_entries[i].ip); + if (!e) + return -ENOMEM; + idx = hash_ip(static_entries[i].ip); + e->eth = static_entries[i].addr; + e->state = ARP_STATE_STATIC; + insert_entry(e, idx); + } + + spin_unlock_np(&arp_lock); + + return 0; +} diff --git a/libos/net/core.c b/libos/net/core.c new file mode 100644 index 0000000..5f82d26 --- /dev/null +++ b/libos/net/core.c @@ -0,0 +1,526 @@ +/* + * core.c - core networking infrastructure + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define IP_ID_SEED 0x42345323 +#define RX_PREFETCH_STRIDE 2 + +/* RX buffer allocation */ +static struct slab net_rx_buf_slab; +static struct tcache *net_rx_buf_tcache; +static DEFINE_PERCPU(struct tcache_percpu, net_rx_buf_percpu); + +/* TX buffer allocation */ +static struct mempool net_tx_buf_mp; +static struct tcache *net_tx_buf_tcache; +static DEFINE_PERCPU(struct tcache_percpu, net_tx_buf_percpu); + +#define MBUF_RESERVED (align_up(sizeof(struct mbuf), CACHE_LINE_SIZE)) + +/* + * RX Networking Functions + */ + +static void net_rx_release_mbuf(struct mbuf *m) +{ + preempt_disable(); + tcache_free(&percpu_get(net_rx_buf_percpu), m); + preempt_enable(); +} + +static void net_rx_send_completion(uint64_t completion_data) +{ + struct kthread *k; + + k = getk(); + if (unlikely(!lrpc_send(&k->txcmdq, TXCMD_NET_COMPLETE, completion_data))) { + WARN(); + } + putk(); +} + +static struct mbuf *net_rx_alloc_mbuf(struct rx_net_hdr *hdr) +{ + struct mbuf *m; + void *buf; + + preempt_disable(); + /* allocate the buffer to store the payload */ + m = tcache_alloc(&percpu_get(net_rx_buf_percpu)); + if (unlikely(!m)) { + preempt_enable(); + goto fail_buf; + } + + preempt_enable(); + + buf = (unsigned char *)m + MBUF_RESERVED; + + /* copy the payload and release the buffer back to the iokernel */ + memcpy(buf, hdr->payload, hdr->len); + + mbuf_init(m, buf, MBUF_DEFAULT_LEN - MBUF_RESERVED, 0); + m->len = hdr->len; + m->csum_type = hdr->csum_type; + m->csum = hdr->csum; + m->rss_hash = hdr->rss_hash; + + barrier(); + net_rx_send_completion(hdr->completion_data); + + m->release_data = 0; + m->release = net_rx_release_mbuf; + return m; + +fail_buf: + net_rx_send_completion(hdr->completion_data); + return NULL; +} + +static inline bool ip_hdr_supported(const struct ip_hdr *iphdr) +{ + /* must be IPv4, no IP options, no IP fragments */ + return (iphdr->version == IPVERSION && iphdr->header_len == sizeof(*iphdr) / sizeof(uint32_t) && + (iphdr->off & IP_MF) == 0); +} + +/** + * net_error - reports a network error so that it can be passed to higher layers + * @m: the egress mbuf that triggered the error + * @err: the suggested error code to report + * + * The mbuf data pointer must point to the network-layer (L3) hdr that failed. + */ +void net_error(struct mbuf *m, int err) +{ + const struct ip_hdr *iphdr; + + iphdr = mbuf_pull_hdr_or_null(m, *iphdr); + if (unlikely(!iphdr)) + return; + if (unlikely(!ip_hdr_supported(iphdr))) + return; + + /* don't check length because ICMP may not provide the full payload */ + + /* so far we only support error handling in UDP and TCP */ + if (iphdr->proto == IPPROTO_UDP || iphdr->proto == IPPROTO_TCP) + trans_error(m, err); +} + +static struct mbuf *net_rx_one(struct rx_net_hdr *hdr) +{ + struct mbuf *m; + const struct eth_hdr *llhdr; + const struct ip_hdr *iphdr; + uint16_t len; + + m = net_rx_alloc_mbuf(hdr); + if (unlikely(!m)) + return NULL; + + ADD_STAT(RX, 1); + + /* + * Link Layer Processing (OSI L2) + */ + + llhdr = mbuf_pull_hdr_or_null(m, *llhdr); + if (unlikely(!llhdr)) + goto drop; + + /* handle ARP requests */ + if (ntoh16(llhdr->type) == ETHTYPE_ARP) { + net_rx_arp(m); + return NULL; + } + + /* filter out requests we can't handle */ + BUILD_ASSERT(sizeof(llhdr->dhost.addr) == sizeof(io->mac.addr)); + if (unlikely(ntoh16(llhdr->type) != ETHTYPE_IP || + memcmp(llhdr->dhost.addr, io->mac.addr, sizeof(llhdr->dhost.addr)) != 0)) + goto drop; + + /* + * Network Layer Processing (OSI L3) + */ + + mbuf_mark_network_offset(m); + iphdr = mbuf_pull_hdr_or_null(m, *iphdr); + if (unlikely(!iphdr)) + goto drop; + + /* Did HW checksum verification pass? */ + if (hdr->csum_type != CHECKSUM_TYPE_UNNECESSARY) { + if (cksum_internet(iphdr, sizeof(*iphdr))) + goto drop; + } + + if (unlikely(!ip_hdr_supported(iphdr))) + goto drop; + + len = ntoh16(iphdr->len) - sizeof(*iphdr); + if (unlikely(mbuf_length(m) < len)) + goto drop; + if (len < mbuf_length(m)) + mbuf_trim(m, mbuf_length(m) - len); + + switch (iphdr->proto) { + case IPPROTO_ICMP: + net_rx_icmp(m, iphdr, len); + break; + + case IPPROTO_UDP: + case IPPROTO_TCP: + return m; + + default: + goto drop; + } + + return NULL; + +drop: + mbuf_free(m); + return NULL; +} + +/** + * net_rx_softirq - handles ingress packet processing + * @hdrs: an array of ingress packet headers + * @nr: the size of the @hdrs array + */ +void net_rx_softirq(struct rx_net_hdr **hdrs, int nr) +{ + struct mbuf *l4_reqs[SOFTIRQ_MAX_BUDGET]; + int i, l4idx = 0; + + for (i = 0; i < nr; i++) { + if (i + RX_PREFETCH_STRIDE < nr) + prefetch(hdrs[i + RX_PREFETCH_STRIDE]); + l4_reqs[l4idx] = net_rx_one(hdrs[i]); + if (l4_reqs[l4idx] != NULL) + l4idx++; + } + + /* handle transport protocol layer */ + if (l4idx > 0) + net_rx_trans(l4_reqs, l4idx); +} + +/* + * TX Networking Functions + */ + +/** + * net_tx_release_mbuf - the default TX mbuf release handler + * @m: the mbuf to free + * + * Normally, this handler will get called automatically. If you override + * mbuf.release(), call this method manually. + */ +void net_tx_release_mbuf(struct mbuf *m) +{ + preempt_disable(); + tcache_free(&percpu_get(net_tx_buf_percpu), m); + preempt_enable(); +} + +/** + * net_tx_alloc_mbuf - allocates an mbuf for transmitting. + * + * Returns an mbuf, or NULL if out of memory. + */ +struct mbuf *net_tx_alloc_mbuf(void) +{ + struct mbuf *m; + unsigned char *buf; + + preempt_disable(); + m = tcache_alloc(&percpu_get(net_tx_buf_percpu)); + if (unlikely(!m)) { + preempt_enable(); + log_warn("net: out of tx buffers"); + return NULL; + } + + preempt_enable(); + + buf = (unsigned char *)m + MBUF_RESERVED; + + mbuf_init(m, buf, MBUF_DEFAULT_LEN - MBUF_RESERVED, MBUF_DEFAULT_HEADROOM); + m->csum_type = CHECKSUM_TYPE_NEEDED; + m->txflags = 0; + m->release_data = 0; + m->release = net_tx_release_mbuf; + return m; +} + +/* drains overflow queues */ +void __noinline net_tx_drain_overflow(void) +{ + struct mbuf *m; + struct kthread *k = thisk(); + + assert_preempt_disabled(); + + /* drain TX packets */ + while (!mbufq_empty(&k->txpktq_overflow)) { + m = mbufq_peak_head(&k->txpktq_overflow); + if (!lrpc_send(&k->txpktq, TXPKT_NET_XMIT, (uint64_t)mbuf_data(m))) + break; + mbufq_pop_head(&k->txpktq_overflow); + } +} + +static void net_tx_raw(struct mbuf *m) +{ + struct kthread *k; + struct tx_net_hdr *hdr; + unsigned int len = mbuf_length(m); + + ADD_STAT(TX, 1); + + k = getk(); + /* drain pending overflow packets first */ + if (!mbufq_empty(&k->txpktq_overflow)) + net_tx_drain_overflow(); + + hdr = mbuf_push_hdr(m, *hdr); + hdr->completion_data = (unsigned long)m; + hdr->len = len; + hdr->olflags = m->txflags; + + if (!lrpc_send(&k->txpktq, TXPKT_NET_XMIT, (uint64_t)hdr)) { + mbufq_push_tail(&k->txpktq_overflow, m); + } + putk(); +} + +/** + * net_tx_eth - transmits an ethernet packet + * @m: the mbuf to transmit + * @type: the ethernet type (in native byte order) + * @dhost: the destination MAC address + * + * The payload must start with the network (L3) header. The ethernet (L2) + * header will be prepended by this function. + * + * @m must have been allocated with net_tx_alloc_mbuf(). + * + * Returns 0 if successful. If successful, the mbuf will be freed when the + * transmit completes. Otherwise, the mbuf still belongs to the caller. + */ +int net_tx_eth(struct mbuf *m, uint16_t type, struct eth_addr dhost) +{ + struct eth_hdr *eth_hdr; + + eth_hdr = mbuf_push_hdr(m, *eth_hdr); + eth_hdr->shost = io->mac; + eth_hdr->dhost = dhost; + eth_hdr->type = hton16(type); + net_tx_raw(m); + return 0; +} + +static void net_push_iphdr(struct mbuf *m, uint8_t proto, uint32_t daddr) +{ + struct ip_hdr *iphdr; + + /* TODO: Support "don't fragment" (DF) flag? */ + + /* populate IP header */ + iphdr = mbuf_push_hdr(m, *iphdr); + iphdr->version = IPVERSION; + iphdr->header_len = 5; + iphdr->tos = IPTOS_DSCP_CS0 | IPTOS_ECN_NOTECT; + iphdr->len = hton16(mbuf_length(m)); + /* This must be unique across datagrams within a flow, see RFC 6864 */ + iphdr->id = hash_crc32c_two(IP_ID_SEED, now_tsc() ^ proto, + (uint64_t)daddr | ((uint64_t)io->addr << 32)); + iphdr->off = 0; + iphdr->ttl = 64; + iphdr->proto = proto; + iphdr->cksum = 0; + iphdr->saddr = hton32(io->addr); + iphdr->daddr = hton32(daddr); +} + +static uint32_t net_get_ip_route(uint32_t daddr) +{ + /* simple IP routing */ + if ((daddr & io->netmask) != (io->addr & io->netmask)) + daddr = io->gateway; + return daddr; +} + +/** + * net_tx_ip - transmits an IP packet + * @m: the mbuf to transmit + * @proto: the transport protocol + * @daddr: the destination IP address (in native byte order) + * + * The payload must start with the transport (L4) header. The IPv4 (L3) and + * ethernet (L2) headers will be prepended by this function. + * + * @m must have been allocated with net_tx_alloc_mbuf(). + * + * Returns 0 if successful. If successful, the mbuf will be freed when the + * transmit completes. Otherwise, the mbuf still belongs to the caller. + */ +int net_tx_ip(struct mbuf *m, uint8_t proto, uint32_t daddr) +{ + struct eth_addr dhost; + int ret; + + /* prepend the IP header */ + net_push_iphdr(m, proto, daddr); + + /* ask NIC to calculate IP checksum */ + m->txflags |= OLFLAG_IP_CHKSUM | OLFLAG_IPV4; + + /* apply IP routing */ + daddr = net_get_ip_route(daddr); + + /* need to use ARP to resolve dhost */ + ret = arp_lookup(daddr, &dhost, m); + if (unlikely(ret)) { + if (ret == -EINPROGRESS) { + /* ARP code now owns the mbuf */ + return 0; + } else { + /* An unrecoverable error occurred */ + mbuf_pull_hdr(m, struct ip_hdr); + return ret; + } + } + + ret = net_tx_eth(m, ETHTYPE_IP, dhost); + assert(!ret); /* can't fail as implemented so far */ + return 0; +} + +/** + * net_tx_ip_burst - transmits a burst of IP packets + * @ms: an array of mbuf pointers to transmit + * @n: the number of mbufs in @ms + * @proto: the transport protocol + * @daddr: the destination IP address (in native byte order) + * + * The payload must start with the transport (L4) header. The IPv4 (L3) and + * ethernet (L2) headers will be prepended by this function. + * + * @ms must have been allocated with net_tx_alloc_mbuf(). + * + * Returns 0 if successful. If successful, the mbufs will be freed when the + * transmit completes. Otherwise, the mbufs still belongs to the caller. If + * ARP doesn't have a cached entry, only the first mbuf will be transmitted + * when the ARP request resolves. + */ +int net_tx_ip_burst(struct mbuf **ms, int n, uint8_t proto, uint32_t daddr) +{ + struct eth_addr dhost; + int ret, i; + + assert(n > 0); + + /* prepare the mbufs */ + for (i = 0; i < n; i++) { + /* prepend the IP header */ + net_push_iphdr(ms[i], proto, daddr); + + /* ask NIC to calculate IP checksum */ + ms[i]->txflags |= OLFLAG_IP_CHKSUM | OLFLAG_IPV4; + } + + /* apply IP routing */ + daddr = net_get_ip_route(daddr); + + /* use ARP to resolve dhost */ + ret = arp_lookup(daddr, &dhost, ms[0]); + if (unlikely(ret)) { + if (ret == -EINPROGRESS) { + /* ARP code now owns the first mbuf */ + return 0; + } else { + /* An unrecoverable error occurred */ + for (i = 0; i < n; i++) mbuf_pull_hdr(ms[i], struct ip_hdr); + return ret; + } + } + + /* finally, transmit the packets */ + for (i = 0; i < n; i++) { + ret = net_tx_eth(ms[i], ETHTYPE_IP, dhost); + assert(!ret); /* can't fail as implemented so far */ + } + + return 0; +} + +/** + * net_init_percpu - initializes per-CPU state for the network stack + * + * Returns 0 (can't fail). + */ +int net_init_percpu(void) +{ + tcache_init_percpu(net_rx_buf_tcache, &percpu_get(net_rx_buf_percpu)); + tcache_init_percpu(net_tx_buf_tcache, &percpu_get(net_tx_buf_percpu)); + return 0; +} + +static void net_dump_config(void) +{ + char buf[IP_ADDR_STR_LEN]; + + log_info("net: using the following configuration:"); + log_info(" addr:\t%s", ip_addr_to_str(io->addr, buf)); + log_info(" netmask:\t%s", ip_addr_to_str(io->netmask, buf)); + log_info(" gateway:\t%s", ip_addr_to_str(io->gateway, buf)); + log_info(" mac:\t%02X:%02X:%02X:%02X:%02X:%02X", io->mac.addr[0], io->mac.addr[1], + io->mac.addr[2], io->mac.addr[3], io->mac.addr[4], io->mac.addr[5]); +} + +/** + * net_init - initializes the network stack + * + * Returns 0 if successful. + */ +int net_init(void) +{ + int ret; + + ret = slab_create(&net_rx_buf_slab, "SKYLOFT_RX_BUFS", MBUF_DEFAULT_LEN, SLAB_FLAG_LGPAGE); + if (ret) + return ret; + + net_rx_buf_tcache = slab_create_tcache(&net_rx_buf_slab, TCACHE_DEFAULT_MAG_SIZE); + if (!net_rx_buf_tcache) + return -ENOMEM; + + ret = mempool_create(&net_tx_buf_mp, io->tx_region.base, io->tx_region.len, PGSIZE_2MB, + MBUF_DEFAULT_LEN); + if (ret) + return ret; + + net_tx_buf_tcache = + mempool_create_tcache(&net_tx_buf_mp, "SKYLOFT_TX_BUFS", TCACHE_DEFAULT_MAG_SIZE); + if (!net_tx_buf_tcache) + return -ENOMEM; + + log_info("net: started network stack"); + net_dump_config(); + return 0; +} diff --git a/libos/net/dump.c b/libos/net/dump.c new file mode 100644 index 0000000..38d194d --- /dev/null +++ b/libos/net/dump.c @@ -0,0 +1,101 @@ +#include + +#include +#include + +/** + * dump_eth_pkt - prints an ethernet header + * @loglvl: the log level to use + * @hdr: the ethernet header + */ +void dump_eth_pkt(int loglvl, struct eth_hdr *hdr) +{ + struct eth_addr *dmac = &hdr->dhost; + struct eth_addr *smac = &hdr->shost; + + logk(loglvl, "ETHERNET packet dump"); + logk(loglvl, "\tdst MAC: %02X:%02X:%02X:%02X:%02X:%02X", dmac->addr[0], dmac->addr[1], + dmac->addr[2], dmac->addr[3], dmac->addr[4], dmac->addr[5]); + logk(loglvl, "\tsrc MAC: %02X:%02X:%02X:%02X:%02X:%02X", smac->addr[0], smac->addr[1], + smac->addr[2], smac->addr[3], smac->addr[4], smac->addr[5]); + logk(loglvl, "\tframe type: %x", ntoh16(hdr->type)); +} + +/** + * dump_arp_pkt - prints an arp header + * @loglvl: the log level to use + * @arphdr: the arp header + * @ethip: the arp payload (can be NULL) + * + * If @ethip is NULL, then assumes an unsupported htype and/or ptype. + */ +void dump_arp_pkt(int loglvl, struct arp_hdr *arphdr, struct arp_hdr_ethip *ethip) +{ + struct eth_addr *smac = ðip->sender_mac; + struct eth_addr *tmac = ðip->target_mac; + uint16_t op; + uint32_t sip, tip; + + op = ntoh16(arphdr->op); + sip = ntoh32(ethip->sender_ip); + tip = ntoh32(ethip->target_ip); + + logk(loglvl, "ARP packet dump: op %s", (op == ARP_OP_REQUEST) ? "request" : "response"); + + if (!ethip) { + logk(loglvl, "\tunsupported htype %d, ptype %d", ntoh16(arphdr->htype), + ntoh16(arphdr->ptype)); + return; + } + + logk(loglvl, "\tsender MAC:\t%02X:%02X:%02X:%02X:%02X:%02X", smac->addr[0], smac->addr[1], + smac->addr[2], smac->addr[3], smac->addr[4], smac->addr[5]); + logk(loglvl, "\tsender IP:\t%d.%d.%d.%d", ((sip >> 24) & 0xff), ((sip >> 16) & 0xff), + ((sip >> 8) & 0xff), (sip & 0xff)); + logk(loglvl, "\ttarget MAC:\t%02X:%02X:%02X:%02X:%02X:%02X", tmac->addr[0], tmac->addr[1], + tmac->addr[2], tmac->addr[3], tmac->addr[4], tmac->addr[5]); + logk(loglvl, "\ttarget IP:\t%d.%d.%d.%d", ((tip >> 24) & 0xff), ((tip >> 16) & 0xff), + ((tip >> 8) & 0xff), (tip & 0xff)); +} + +void dump_udp_pkt(int loglvl, uint32_t saddr, struct udp_hdr *udp_hdr, void *data) +{ + char sip[IP_ADDR_STR_LEN]; + char line[256]; + uint16_t sport, dport, len; + size_t c, d; + int i, j, k; + + ip_addr_to_str(saddr, sip); + sport = ntoh16(udp_hdr->src_port); + dport = ntoh16(udp_hdr->dst_port); + len = ntoh16(udp_hdr->len) - sizeof(*udp_hdr); + + logk(loglvl, "UDP packet received from %s:%d on port %d", sip, sport, dport); + for (c = 0; c < len;) { + d = snprintf(line, 256, "%016lx: ", c); + for (i = 0; i < 8 && c < len; i++) { + for (j = 0; j < 2 && c < len; j++, c++) { + k = *(((char *)data) + c); + d += snprintf(line + d, 256 - d, "%02x", k); + } + d += snprintf(line + d, 256 - d, " "); + } + line[d] = 0; + logk(loglvl, "%s", line); + } +} + +/** + * ip_addr_to_str - prints an IP address as a human-readable string + * @addr: the ip address + * @str: a buffer to store the string + * + * The buffer must be IP_ADDR_STR_LEN in size. + */ +char *ip_addr_to_str(uint32_t addr, char *str) +{ + snprintf(str, IP_ADDR_STR_LEN, "%d.%d.%d.%d", ((addr >> 24) & 0xff), ((addr >> 16) & 0xff), + ((addr >> 8) & 0xff), (addr & 0xff)); + return str; +} diff --git a/libos/net/icmp.c b/libos/net/icmp.c new file mode 100644 index 0000000..bb624f2 --- /dev/null +++ b/libos/net/icmp.c @@ -0,0 +1,96 @@ +/* + * icmp.c - support for Internet Control Message Protocol (ICMP) + */ + +#include + +#include +#include +#include + +static void net_rx_icmp_echo(struct mbuf *m_in, const struct icmp_pkt *in_icmp_pkt, + const struct ip_hdr *in_iphdr, uint16_t len) +{ + struct mbuf *m; + struct icmp_hdr *out_icmp_hdr; + + log_debug("icmp: responding to icmp echo request"); + + m = net_tx_alloc_mbuf(); + if (unlikely(!m)) { + mbuf_free(m_in); + return; + } + + /* copy incoming ICMP hdr and data, set type and checksum */ + out_icmp_hdr = (struct icmp_hdr *)mbuf_put(m, len); + memcpy(out_icmp_hdr, in_icmp_pkt, len); + out_icmp_hdr->type = ICMP_ECHOREPLY; + out_icmp_hdr->cksum = 0; + out_icmp_hdr->cksum = cksum_internet((char *)out_icmp_hdr, len); + + /* send the echo reply */ + net_tx_ip_or_free(m, IPPROTO_ICMP, ntoh32(in_iphdr->saddr)); + mbuf_free(m_in); +} + +static void net_rx_icmp_echo_reply(struct mbuf *m, const struct icmp_pkt *icmp_pkt, uint16_t len) +{ + struct ping_payload *payload; + + log_debug("icmp: received icmp echo reply"); + + payload = mbuf_pull_hdr_or_null(m, *payload); + if (unlikely(!payload)) + goto drop; + + net_recv_ping(payload, icmp_pkt); + +drop: + mbuf_free(m); +} + +void net_rx_icmp(struct mbuf *m, const struct ip_hdr *iphdr, uint16_t len) +{ + struct icmp_pkt *icmp_pkt; + + icmp_pkt = (struct icmp_pkt *)mbuf_pull_or_null(m, ICMP_MINLEN); + if (unlikely(!icmp_pkt)) + goto drop; + + switch (icmp_pkt->hdr.type) { + case ICMP_ECHO: + net_rx_icmp_echo(m, icmp_pkt, iphdr, len); + break; + case ICMP_ECHOREPLY: + net_rx_icmp_echo_reply(m, icmp_pkt, len); + break; + default: + log_err("icmp: type %d not yet supported", icmp_pkt->hdr.type); + goto drop; + } + + return; + +drop: + mbuf_free(m); +} + +int net_tx_icmp(struct mbuf *m, uint8_t type, uint8_t code, uint32_t daddr, uint16_t id, + uint16_t seq) +{ + struct icmp_pkt *icmp_pkt; + + log_debug("icmp: sending icmp with type %u, code %u", type, code); + + /* populate ICMP header */ + icmp_pkt = (struct icmp_pkt *)mbuf_push(m, ICMP_MINLEN); + icmp_pkt->hdr.type = ICMP_ECHO; + icmp_pkt->hdr.code = 0; + icmp_pkt->icmp_id = id; + icmp_pkt->icmp_seq = seq; + icmp_pkt->hdr.cksum = 0; + icmp_pkt->hdr.cksum = cksum_internet((char *)icmp_pkt, mbuf_length(m)); + + return net_tx_ip(m, IPPROTO_ICMP, daddr); +} diff --git a/libos/net/mbuf.c b/libos/net/mbuf.c new file mode 100644 index 0000000..cc1318b --- /dev/null +++ b/libos/net/mbuf.c @@ -0,0 +1,29 @@ +/* + * mbuf.c - buffer management for network packets + */ + +#include + +#include + +/** + * mbuf_clone - creates an identical copy of an mbuf + * @dst: the destination mbuf + * @src: the source mbuf + * + * Returns the destination mbuf. + */ +struct mbuf *mbuf_clone(struct mbuf *dst, struct mbuf *src) +{ + /* copy the backing buffer */ + dst->data = dst->head + mbuf_headroom(src); + memcpy(mbuf_put(dst, mbuf_length(src)), + mbuf_data(src), mbuf_length(src)); + + /* copy packet metadata */ + dst->csum_type = src->csum_type; + dst->csum = src->csum; + dst->txflags = src->txflags; /* NOTE: this is a union */ + + return dst; +} diff --git a/libos/net/ping.c b/libos/net/ping.c new file mode 100644 index 0000000..e0c074e --- /dev/null +++ b/libos/net/ping.c @@ -0,0 +1,68 @@ +/* + * ping.c - simple ping utility + */ + +#include + +#include +#include +#include +#include +#include + +static uint16_t ping_id; + +int net_ping_init(void) +{ + ping_id = rand(); + return 0; +} + +void net_send_ping(uint16_t seq_num, uint32_t daddr) +{ + struct mbuf *m; + struct ping_payload *payload; + + log_debug("ping: sending ping with id %u, seq_num %u to %u", ping_id, seq_num, daddr); + + m = net_tx_alloc_mbuf(); + if (unlikely(!m)) + return; + + /* add send timestamp to payload */ + payload = mbuf_push_hdr(m, struct ping_payload); + gettimeofday(&payload->tx_time, NULL); + + if (unlikely(net_tx_icmp(m, ICMP_ECHO, 0, daddr, ping_id, seq_num) != 0)) + mbuf_free(m); +} + +/* + * Subtract 2 timeval structs: out -= in. Assume out >= in. + */ +static void timeval_subtract(struct timeval *out, const struct timeval *in) +{ + if ((out->tv_usec -= in->tv_usec) < 0) { + --out->tv_sec; + out->tv_usec += 1000000; + } + out->tv_sec -= in->tv_sec; +} + +void net_recv_ping(const struct ping_payload *payload, const struct icmp_pkt *icmp_pkt) +{ + struct timeval tmp_time; + uint32_t latency_us; + + if (icmp_pkt->icmp_id != ping_id) { + /* this ICMP pkt is not for us */ + return; + } + + /* determine latency */ + gettimeofday(&tmp_time, NULL); + timeval_subtract(&tmp_time, &payload->tx_time); + latency_us = tmp_time.tv_sec * 1000000 + tmp_time.tv_usec; + + log_debug("ping: received ping with seq_num %u, latency %u us", icmp_pkt->icmp_seq, latency_us); +} diff --git a/libos/net/tcp.c b/libos/net/tcp.c new file mode 100644 index 0000000..cc00dfb --- /dev/null +++ b/libos/net/tcp.c @@ -0,0 +1,1115 @@ +/* + * tcp.c - support for Transmission Control Protocol (RFC 793) + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tcp.h" + +/* protects @tcp_conns */ +static DEFINE_SPINLOCK(tcp_lock); +/* a list of all TCP connections */ +static DEFINE_LIST_HEAD(tcp_conns); + +static void tcp_retransmit(void *arg); + +void tcp_timer_update(tcp_conn_t *c) +{ + uint64_t next_timeout = -1L; + struct mbuf *m; + assert_spin_lock_held(&c->lock); + + if (unlikely(c->pcb.state == TCP_STATE_TIME_WAIT)) + next_timeout = c->time_wait_ts + TCP_TIME_WAIT_TIMEOUT; + + if (c->ack_delayed) + next_timeout = MIN(next_timeout, c->ack_ts + TCP_ACK_TIMEOUT); + + if (!c->tx_exclusive) { + m = list_top(&c->txq, struct mbuf, link); + if (m) + next_timeout = MIN(next_timeout, m->timestamp + TCP_RETRANSMIT_TIMEOUT); + } + + if (!list_empty(&c->rxq_ooo)) + next_timeout = MIN(next_timeout, now_us() + TCP_OOQ_ACK_TIMEOUT); + + atomic_store_rel(&c->next_timeout, next_timeout); +} + +/* check for timeouts in a TCP connection */ +static void tcp_handle_timeouts(tcp_conn_t *c, uint64_t now) +{ + bool do_ack = false, do_retransmit = false; + + spin_lock_np(&c->lock); + if (c->pcb.state == TCP_STATE_CLOSED) { + spin_unlock_np(&c->lock); + return; + } + + if (c->pcb.state == TCP_STATE_TIME_WAIT && now - c->time_wait_ts >= TCP_TIME_WAIT_TIMEOUT) { + log_debug("tcp: %p time wait timeout", c); + tcp_conn_set_state(c, TCP_STATE_CLOSED); + spin_unlock_np(&c->lock); + tcp_conn_put(c); + return; + } + if (c->ack_delayed && now - c->ack_ts >= TCP_ACK_TIMEOUT) { + log_debug("tcp: %p delayed ack timeout", c); + c->ack_delayed = false; + do_ack = true; + } + if (!c->tx_exclusive && !list_empty(&c->txq)) { + struct mbuf *m = list_top(&c->txq, struct mbuf, link); + if (now - m->timestamp >= TCP_RETRANSMIT_TIMEOUT) { + log_debug("tcp: %p retransmission timeout", c); + /* It is safe to take a reference, since state != closed */ + tcp_conn_get(c); + do_retransmit = true; + } + } + + do_ack |= !list_empty(&c->rxq_ooo); + + tcp_timer_update(c); + + spin_unlock_np(&c->lock); + + if (do_ack) + tcp_tx_ack(c); + if (do_retransmit) + sl_task_spawn(tcp_retransmit, c, 0); +} + +/* a periodic background thread that handles timeout events */ +static void tcp_worker(void *arg) +{ + tcp_conn_t *c; + uint64_t now; + + while (true) { + now = now_us(); + + spin_lock_np(&tcp_lock); + list_for_each(&tcp_conns, c, global_link) + { + if (atomic_load_acq(&c->next_timeout) <= now) + tcp_handle_timeouts(c, now); + } + spin_unlock_np(&tcp_lock); + + timer_sleep(10 * USEC_PER_MSEC); + } +} + +/** + * tcp_conn_ack - removes acknowledged packets from TX queue + * @c: the TCP connection to update + * @freeq: a pointer to a list to store acknowledged buffers to later free + * + * WARNING: the caller must hold @c->lock. + * @freeq is provided so that @c->lock can be released before freeing buffers. + */ +void tcp_conn_ack(tcp_conn_t *c, struct list_head *freeq) +{ + struct mbuf *m; + + assert_spin_lock_held(&c->lock); + + /* will free these segments later */ + if (c->tx_exclusive) + return; + + /* dequeue buffers that are fully acknowledged */ + while (true) { + m = list_top(&c->txq, struct mbuf, link); + if (!m) + break; + if (wraps_gt(m->seg_end, c->pcb.snd_una)) + break; + + list_pop(&c->txq, struct mbuf, link); + list_add_tail(freeq, &m->link); + } +} + +/** + * tcp_conn_set_state - changes the TCP PCB state + * @c: the TCP connection to update + * @new_state: the new TCP_STATE_* value + * + * WARNING: @c->lock must be held by the caller. + * WARNING: @new_state must be greater than the current state. + */ +void tcp_conn_set_state(tcp_conn_t *c, int new_state) +{ + assert_spin_lock_held(&c->lock); + assert(c->pcb.state == TCP_STATE_CLOSED || c->pcb.state < new_state); + + /* unblock any threads waiting for the connection to be established */ + if (c->pcb.state < TCP_STATE_ESTABLISHED && new_state >= TCP_STATE_ESTABLISHED) { + waitq_release(&c->tx_wq); + } + + tcp_debug_state_change(c, c->pcb.state, new_state); + c->pcb.state = new_state; + tcp_timer_update(c); +} + +/* handles network errors for TCP sockets */ +static void tcp_conn_err(struct trans_entry *e, int err) +{ + tcp_conn_t *c = container_of(e, tcp_conn_t, e); + + spin_lock_np(&c->lock); + tcp_conn_fail(c, err); + spin_unlock_np(&c->lock); +} + +/* operations for TCP sockets */ +const struct trans_ops tcp_conn_ops = { + .recv = tcp_rx_conn, + .err = tcp_conn_err, +}; + +/* + * Connection initialization + */ + +/** + * tcp_conn_alloc - allocates a TCP connection struct + * + * Returns a connection, or NULL if out of memory. + */ +tcp_conn_t *tcp_conn_alloc(void) +{ + tcp_conn_t *c; + + c = smalloc(sizeof(*c)); + if (!c) + return NULL; + + /* general fields */ + memset(&c->pcb, 0, sizeof(c->pcb)); + spin_lock_init(&c->lock); + kref_init(&c->ref); + c->err = 0; + + /* ingress fields */ + c->rx_closed = false; + c->rx_exclusive = false; + waitq_init(&c->rx_wq); + list_head_init(&c->rxq_ooo); + list_head_init(&c->rxq); + + /* egress fields */ + c->tx_closed = false; + c->tx_exclusive = false; + waitq_init(&c->tx_wq); + c->tx_last_ack = 0; + c->tx_last_win = 0; + c->tx_pending = NULL; + list_head_init(&c->txq); + c->do_fast_retransmit = false; + + /* timeouts */ + c->next_timeout = -1L; + c->ack_delayed = false; + c->rcv_wnd_full = false; + c->ack_ts = 0; + c->time_wait_ts = 0; + c->rep_acks = 0; + + /* initialize egress half of PCB */ + c->pcb.state = TCP_STATE_CLOSED; + c->pcb.iss = rand_crc32c(0x12345678); /* TODO: not enough */ + c->pcb.snd_nxt = c->pcb.iss; + c->pcb.snd_una = c->pcb.iss; + c->pcb.rcv_wnd = TCP_WIN; + + return c; +} + +/** + * tcp_conn_attach - attaches a connection to the transport layer + * @c: the connection to attach + * @laddr: the local network address + * @raddr: the remote network address + * + * After calling this function, if successful, ingress packets and errors will + * be delivered. + */ +int tcp_conn_attach(tcp_conn_t *c, struct netaddr laddr, struct netaddr raddr) +{ + int ret; + + if (laddr.ip == 0) + laddr.ip = io->addr; + else if (laddr.ip != io->addr) + return -EINVAL; + + trans_init_5tuple(&c->e, IPPROTO_TCP, &tcp_conn_ops, laddr, raddr); + if (laddr.port == 0) + ret = trans_table_add_with_ephemeral_port(&c->e); + else + ret = trans_table_add(&c->e); + if (ret) + return ret; + + static bool worker_running; + bool start_worker = false; + spin_lock_np(&tcp_lock); + if (unlikely(!worker_running)) + worker_running = start_worker = true; + list_add_tail(&tcp_conns, &c->global_link); + spin_unlock_np(&tcp_lock); + + if (start_worker) + BUG_ON(sl_task_spawn(tcp_worker, NULL, 0)); + + return 0; +} + +static void tcp_conn_release(struct rcu_head *h) +{ + tcp_conn_t *c = container_of(h, tcp_conn_t, e.rcu); + + spin_lock_np(&tcp_lock); + list_del_from(&tcp_conns, &c->global_link); + spin_unlock_np(&tcp_lock); + + if (c->tx_pending) + mbuf_free(c->tx_pending); + mbuf_list_free(&c->rxq_ooo); + mbuf_list_free(&c->rxq); + mbuf_list_free(&c->txq); + sfree(c); +} + +/** + * tcp_conn_destroy - tears down a frees a TCP connection + * @c: the connection to destroy + */ +void tcp_conn_destroy(tcp_conn_t *c) +{ + trans_table_remove(&c->e); + rcu_free(&c->e.rcu, tcp_conn_release); +} + +/** + * tcp_conn_release_ref - a helper to free the conn when ref reaches zero + * @r: the embedded reference count structure + */ +void tcp_conn_release_ref(struct kref *r) +{ + tcp_conn_t *c = container_of(r, tcp_conn_t, ref); + + BUG_ON(c->pcb.state != TCP_STATE_CLOSED); + tcp_conn_destroy(c); +} + +/* + * Support for accepting new connections + */ + +struct tcp_queue { + struct trans_entry e; + spinlock_t l; + waitq_t wq; + struct list_head conns; + int backlog; + bool shutdown; +}; + +static void tcp_queue_recv(struct trans_entry *e, struct mbuf *m) +{ + tcp_queue_t *q = container_of(e, tcp_queue_t, e); + tcp_conn_t *c; + struct task *t; + + /* make sure the connection queue isn't full */ + spin_lock_np(&q->l); + if (unlikely(q->backlog == 0 || q->shutdown)) { + spin_unlock_np(&q->l); + goto done; + } + q->backlog--; + spin_unlock_np(&q->l); + + /* create a new connection */ + c = tcp_rx_listener(e->laddr, m); + if (!c) { + spin_lock_np(&q->l); + q->backlog++; + spin_unlock_np(&q->l); + goto done; + } + + /* wake a thread to accept the connection */ + spin_lock_np(&q->l); + list_add_tail(&q->conns, &c->queue_link); + t = waitq_signal(&q->wq, &q->l); + spin_unlock_np(&q->l); + waitq_signal_finish(t); + +done: + mbuf_free(m); +} + +/* operations for TCP listen queues */ +const struct trans_ops tcp_queue_ops = { + .recv = tcp_queue_recv, +}; + +/** + * tcp_listen - creates a TCP listening queue for a local address + * @laddr: the local address to listen on + * @backlog: the maximum number of unaccepted sockets to queue + * @q_out: a pointer to store the newly created listening queue + * + * Returns 0 if successful, otherwise fails. + */ +int tcp_listen(struct netaddr laddr, int backlog, tcp_queue_t **q_out) +{ + tcp_queue_t *q; + int ret; + + if (backlog < 1) + return -EINVAL; + + /* only can support one local IP so far */ + if (laddr.ip == 0) + laddr.ip = io->addr; + else if (laddr.ip != io->addr) + return -EINVAL; + + q = smalloc(sizeof(*q)); + if (!q) + return -ENOMEM; + + trans_init_3tuple(&q->e, IPPROTO_TCP, &tcp_queue_ops, laddr); + spin_lock_init(&q->l); + waitq_init(&q->wq); + list_head_init(&q->conns); + q->backlog = backlog; + q->shutdown = false; + + ret = trans_table_add(&q->e); + if (ret) { + sfree(q); + return ret; + } + + *q_out = q; + return 0; +} + +/** + * tcp_accept - accepts a TCP connection + * @q: the listen queue to accept the connection on + * @c_out: a pointer to store the connection + * + * Returns 0 if successful, otherwise -EPIPE if the listen queue was closed. + */ +int tcp_accept(tcp_queue_t *q, tcp_conn_t **c_out) +{ + tcp_conn_t *c; + + spin_lock_np(&q->l); + while (list_empty(&q->conns) && !q->shutdown) waitq_wait(&q->wq, &q->l); + + /* was the queue drained and shutdown? */ + if (list_empty(&q->conns) && q->shutdown) { + spin_unlock_np(&q->l); + return -EPIPE; + } + + /* otherwise a new connection is available */ + q->backlog++; + c = list_pop(&q->conns, tcp_conn_t, queue_link); + assert(c != NULL); + spin_unlock_np(&q->l); + + *c_out = c; + return 0; +} + +static void __tcp_qshutdown(tcp_queue_t *q) +{ + /* mark the listen queue as shutdown */ + spin_lock_np(&q->l); + BUG_ON(q->shutdown); + q->shutdown = true; + spin_unlock_np(&q->l); + + /* prevent ingress receive and error dispatch (after RCU period) */ + trans_table_remove(&q->e); +} + +/** + * tcp_qshutdown - disables a TCP listener queue + * @q: the TCP listener queue to disable + * + * All blocking requests on the queue will return -EPIPE. + */ +void tcp_qshutdown(tcp_queue_t *q) +{ + /* shutdown the listen queue */ + __tcp_qshutdown(q); + + /* wake up all pending threads */ + waitq_release(&q->wq); +} + +static void tcp_queue_release(struct rcu_head *h) +{ + tcp_queue_t *q = container_of(h, tcp_queue_t, e.rcu); + sfree(q); +} + +/** + * tcp_qclose - frees a TCP listener queue + * @q: the TCP listener queue to close + * + * WARNING: Only the last reference can safely call this method. Call + * tcp_qshutdown() first if any threads are sleeping on the queue. + */ +void tcp_qclose(tcp_queue_t *q) +{ + tcp_conn_t *c, *nextc; + + if (!q->shutdown) + __tcp_qshutdown(q); + + BUG_ON(!waitq_empty(&q->wq)); + + /* free all pending connections */ + list_for_each_safe(&q->conns, c, nextc, queue_link) + { + list_del_from(&q->conns, &c->queue_link); + tcp_conn_destroy(c); + } + + rcu_free(&q->e.rcu, tcp_queue_release); +} + +/* + * Support for the TCP socket API + */ + +/** + * tcp_dial - opens a TCP connection, creating a new socket + * @laddr: the local address + * @raddr: the remote address + * @c_out: a pointer to store the new connection + * + * Returns 0 if successful, otherwise fail. + */ +int tcp_dial(struct netaddr laddr, struct netaddr raddr, tcp_conn_t **c_out) +{ + tcp_conn_t *c; + int ret; + + /* create and initialize a connection */ + c = tcp_conn_alloc(); + if (unlikely(!c)) + return -ENOMEM; + + /* + * Attach the connection to the transport layer. From this point onward + * ingress packets can be dispatched to the connection. + */ + ret = tcp_conn_attach(c, laddr, raddr); + if (unlikely(ret)) { + sfree(c); + return ret; + } + + /* send a SYN to the remote host */ + spin_lock_np(&c->lock); + ret = tcp_tx_ctl(c, TCP_SYN); + if (unlikely(ret)) { + spin_unlock_np(&c->lock); + tcp_conn_destroy(c); + return ret; + } + tcp_conn_get(c); /* take a ref for the state machine */ + tcp_conn_set_state(c, TCP_STATE_SYN_SENT); + + /* wait until the connection is established or there is a failure */ + while (!c->tx_closed && c->pcb.state < TCP_STATE_ESTABLISHED) waitq_wait(&c->tx_wq, &c->lock); + + /* check if the connection failed */ + if (c->tx_closed) { + ret = -c->err; + spin_unlock_np(&c->lock); + tcp_conn_destroy(c); + return ret; + } + spin_unlock_np(&c->lock); + + *c_out = c; + return 0; +} + +/** + * tcp_local_addr - gets the local address of a TCP connection + * @c: the TCP connection + */ +struct netaddr tcp_local_addr(tcp_conn_t *c) +{ + return c->e.laddr; +} + +/** + * tcp_remote_addr - gets the remote address of a TCP connection + * @c: the TCP connection + */ +struct netaddr tcp_remote_addr(tcp_conn_t *c) +{ + return c->e.raddr; +} + +static ssize_t tcp_read_wait(tcp_conn_t *c, size_t len, struct list_head *q, struct mbuf **mout) +{ + struct mbuf *m; + size_t readlen = 0; + + *mout = NULL; + spin_lock_np(&c->lock); + + /* block until there is an actionable event */ + while (!c->rx_closed && (c->rx_exclusive || list_empty(&c->rxq))) + waitq_wait(&c->rx_wq, &c->lock); + + /* is the socket closed? */ + if (c->rx_closed) { + spin_unlock_np(&c->lock); + return -c->err; + } + + /* pop off the mbufs that will be read */ + while (readlen < len) { + m = list_top(&c->rxq, struct mbuf, link); + if (!m) + break; + + if (unlikely((m->flags & TCP_FIN) > 0)) { + tcp_conn_shutdown_rx(c); + if (mbuf_length(m) == 0) + break; + } + + if (len - readlen < mbuf_length(m)) { + c->rx_exclusive = true; + *mout = m; + readlen = len; + break; + } + + list_del_from(&c->rxq, &m->link); + list_add_tail(q, &m->link); + readlen += mbuf_length(m); + } + + c->pcb.rcv_wnd += readlen; + if (unlikely(c->rcv_wnd_full && c->pcb.rcv_wnd >= TCP_WIN / 4)) { + tcp_tx_ack(c); + c->rcv_wnd_full = false; + } + spin_unlock_np(&c->lock); + + return readlen; +} + +static void tcp_read_finish(tcp_conn_t *c, struct mbuf *m) +{ + struct list_head waiters; + + if (!m) + return; + + list_head_init(&waiters); + spin_lock_np(&c->lock); + c->rx_exclusive = false; + waitq_release_start(&c->rx_wq, &waiters); + spin_unlock_np(&c->lock); + waitq_release_finish(&waiters); +} + +/** + * tcp_read - reads data from a TCP connection + * @c: the TCP connection + * @buf: a buffer to store the read data + * @len: the length of @buf + * + * Returns the number of bytes read, 0 if the connection is closed, or < 0 + * if an error occurred. + */ +ssize_t tcp_read(tcp_conn_t *c, void *buf, size_t len) +{ + char *pos = buf; + struct list_head q; + struct mbuf *m; + ssize_t ret; + + list_head_init(&q); + + /* wait for data to become available */ + ret = tcp_read_wait(c, len, &q, &m); + + /* check if connection was closed */ + if (ret <= 0) + return ret; + + /* copy the data from the buffers */ + while (true) { + struct mbuf *cur = list_pop(&q, struct mbuf, link); + if (!cur) + break; + + memcpy(pos, mbuf_data(cur), mbuf_length(cur)); + pos += mbuf_length(cur); + mbuf_free(cur); + } + + /* we may have to consume only part of a buffer */ + if (m) { + size_t cpylen = len - (uintptr_t)pos + (uintptr_t)buf; + memcpy(pos, mbuf_pull(m, cpylen), cpylen); + m->seg_seq += cpylen; + } + + /* wakeup any pending readers */ + tcp_read_finish(c, m); + + return ret; +} + +static size_t iov_len(const struct iovec *iov, int iovcnt) +{ + size_t len = 0; + int i; + + for (i = 0; i < iovcnt; i++) len += iov[iovcnt].iov_len; + + return len; +} + +/** + * tcp_readv - reads vectored data from a TCP connection + * @c: the TCP connection + * @iov: a pointer to the IO vector + * @iovcnt: the number of vectors in @iov + * + * Returns the number of bytes read, 0 if the connection is closed, or < 0 + * if an error occurred. + */ +ssize_t tcp_readv(tcp_conn_t *c, const struct iovec *iov, int iovcnt) +{ + struct list_head q; + struct mbuf *m; + ssize_t len = iov_len(iov, iovcnt); + size_t offset = 0; + int i = 0; + + list_head_init(&q); + + /* wait for data to become available */ + len = tcp_read_wait(c, len, &q, &m); + + /* check if connection was closed */ + if (len <= 0) + return len; + + /* copy the data from the buffers */ + while (true) { + struct mbuf *cur = list_pop(&q, struct mbuf, link); + if (!cur) + break; + + do { + const struct iovec *vp = &iov[i]; + size_t cpylen = MIN(vp->iov_len - offset, mbuf_length(m)); + + memcpy((char *)vp->iov_base + offset, mbuf_pull(m, cpylen), cpylen); + + offset += cpylen; + if (offset == vp->iov_len) { + offset = 0; + i++; + } + + assert(i <= iovcnt); + } while (mbuf_length(m) > 0); + mbuf_free(cur); + } + + /* we may have to consume only part of a buffer */ + if (m) { + do { + const struct iovec *vp = &iov[i]; + size_t cpylen = MIN(vp->iov_len - offset, mbuf_length(m)); + + memcpy((char *)vp->iov_base + offset, mbuf_pull(m, cpylen), cpylen); + m->seg_seq += cpylen; + offset += cpylen; + if (offset == vp->iov_len) { + offset = 0; + i++; + } + + assert(mbuf_length(m) > 0); + } while (i < iovcnt); + } + + /* wakeup any pending readers */ + tcp_read_finish(c, m); + + return len; +} + +static int tcp_write_wait(tcp_conn_t *c, size_t *winlen) +{ + spin_lock_np(&c->lock); + + /* block until there is an actionable event */ + while (!c->tx_closed && (c->pcb.state < TCP_STATE_ESTABLISHED || c->tx_exclusive || + wraps_lte(c->pcb.snd_una + c->pcb.snd_wnd, c->pcb.snd_nxt))) { + waitq_wait(&c->tx_wq, &c->lock); + } + + /* is the socket closed? */ + if (c->tx_closed) { + spin_unlock_np(&c->lock); + return c->err ? -c->err : -EPIPE; + } + + /* drop the lock to allow concurrent RX processing */ + c->tx_exclusive = true; + /* TODO: must allow at least one byte to avoid zero window deadlock */ + *winlen = c->pcb.snd_una + c->pcb.snd_wnd - c->pcb.snd_nxt; + spin_unlock_np(&c->lock); + + return 0; +} + +static void tcp_write_finish(tcp_conn_t *c) +{ + struct list_head q; + struct list_head waiters; + struct mbuf *retransmit = NULL; + + assert(c->tx_exclusive == true); + list_head_init(&q); + list_head_init(&waiters); + + spin_lock_np(&c->lock); + c->tx_exclusive = false; + tcp_conn_ack(c, &q); + if (c->pcb.rcv_nxt == c->tx_last_ack) /* race condition check */ + c->ack_delayed = false; + else + c->ack_ts = now_us(); + if (c->pcb.state == TCP_STATE_CLOSED) { + list_append_list(&q, &c->txq); + if (c->tx_pending) { + list_add_tail(&q, &c->tx_pending->link); + c->tx_pending = NULL; + } + } else if (c->do_fast_retransmit) { + c->do_fast_retransmit = false; + if (c->fast_retransmit_last_ack == c->pcb.snd_una) + retransmit = tcp_tx_fast_retransmit_start(c); + } + + tcp_timer_update(c); + waitq_release_start(&c->tx_wq, &waiters); + spin_unlock_np(&c->lock); + + tcp_tx_fast_retransmit_finish(c, retransmit); + waitq_release_finish(&waiters); + mbuf_list_free(&q); +} + +/** + * tcp_write - writes data to a TCP connection + * @c: the TCP connection + * @buf: a buffer from which to copy the data + * @len: the length of the data + * + * Returns the number of bytes written (could be less than @len), or < 0 + * if there was a failure. + */ +ssize_t tcp_write(tcp_conn_t *c, const void *buf, size_t len) +{ + size_t winlen; + ssize_t ret; + + /* block until the data can be sent */ + ret = tcp_write_wait(c, &winlen); + if (ret) + return ret; + + /* actually send the data */ + ret = tcp_tx_send(c, buf, MIN(len, winlen), len <= winlen); + + /* catch up on any pending work */ + tcp_write_finish(c); + + return ret; +} + +/** + * tcp_writev - writes vectored data to a TCP connection + * @c: the TCP connection + * @iov: a pointer to the IO vector + * @iovcnt: the number of vectors in @iov + * + * Returns the number of bytes written (could be less than requested), or < 0 + * if there was a failure. + */ +ssize_t tcp_writev(tcp_conn_t *c, const struct iovec *iov, int iovcnt) +{ + size_t winlen; + ssize_t sent = 0, ret; + int i; + + /* block until the data can be sent */ + ret = tcp_write_wait(c, &winlen); + if (ret) + return ret; + + /* actually send the data */ + for (i = 0; i < iovcnt; i++, iov++) { + if (winlen <= 0) + break; + ret = tcp_tx_send(c, iov->iov_base, MIN(iov->iov_len, winlen), + i == iovcnt - 1 && iov->iov_len <= winlen); + if (ret <= 0) + break; + winlen -= ret; + sent += ret; + } + + /* catch up on any pending work */ + tcp_write_finish(c); + + return sent > 0 ? sent : ret; +} + +/* resend any pending egress packets that timed out */ +static void tcp_retransmit(void *arg) +{ + tcp_conn_t *c = (tcp_conn_t *)arg; + + spin_lock_np(&c->lock); + + while (c->tx_exclusive && c->pcb.state != TCP_STATE_CLOSED) waitq_wait(&c->tx_wq, &c->lock); + + if (c->pcb.state != TCP_STATE_CLOSED) { + c->tx_exclusive = true; + spin_unlock_np(&c->lock); + tcp_tx_retransmit(c); + tcp_write_finish(c); + } else { + spin_unlock_np(&c->lock); + } + + tcp_conn_put(c); +} + +/** + * tcp_conn_fail - closes a TCP both sides of a connection with an error + * @c: the TCP connection to shutdown + * @err: the error code (failure reason for the close) + * + * The caller must hold @c's lock. + */ +void tcp_conn_fail(tcp_conn_t *c, int err) +{ + assert_spin_lock_held(&c->lock); + + c->err = err; + tcp_conn_set_state(c, TCP_STATE_CLOSED); + + if (!c->rx_closed) { + c->rx_closed = true; + waitq_release(&c->rx_wq); + } + + if (!c->tx_closed) { + c->tx_closed = true; + waitq_release(&c->tx_wq); + } + + /* will be freed by the writer if one is busy */ + if (!c->tx_exclusive) { + mbuf_list_free(&c->txq); + if (c->tx_pending) { + mbuf_free(c->tx_pending); + c->tx_pending = NULL; + } + } + if (!c->rx_exclusive) + mbuf_list_free(&c->rxq); + mbuf_list_free(&c->rxq_ooo); + + /* state machine is disabled, drop ref */ + tcp_conn_put(c); +} + +/** + * tcp_conn_shutdown_rx - closes ingress for a TCP connection + * @c: the TCP connection to shutdown + * + * The caller must hold @c's lock. + */ +void tcp_conn_shutdown_rx(tcp_conn_t *c) +{ + assert_spin_lock_held(&c->lock); + + if (c->rx_closed) + return; + + c->rx_closed = true; + waitq_release(&c->rx_wq); +} + +static int tcp_conn_shutdown_tx(tcp_conn_t *c) +{ + int ret; + + assert_spin_lock_held(&c->lock); + + if (c->tx_closed) + return 0; + + assert(c->pcb.state >= TCP_STATE_ESTABLISHED); + while (c->tx_exclusive) waitq_wait(&c->tx_wq, &c->lock); + ret = tcp_tx_ctl(c, TCP_FIN | TCP_ACK); + if (unlikely(ret)) + return ret; + if (c->pcb.state == TCP_STATE_ESTABLISHED) + tcp_conn_set_state(c, TCP_STATE_FIN_WAIT1); + else if (c->pcb.state == TCP_STATE_CLOSE_WAIT) + tcp_conn_set_state(c, TCP_STATE_LAST_ACK); + else + WARN(); + + c->tx_closed = true; + waitq_release(&c->tx_wq); + + return 0; +} + +/** + * tcp_shutdown - shuts a TCP connection down + * @c: the TCP connection to shutdown + * @how: the directions to shutdown (SHUT_RD, SHUT_WR, or SHUT_RDWR) + * + * Returns 0 if successful, otherwise < 0 for failure. + */ +int tcp_shutdown(tcp_conn_t *c, int how) +{ + bool tx, rx; + int ret; + + if (how != SHUT_RD && how != SHUT_WR && how != SHUT_RDWR) + return -EINVAL; + + tx = how == SHUT_WR || how == SHUT_RDWR; + rx = how == SHUT_RD || how == SHUT_RDWR; + + spin_lock_np(&c->lock); + if (tx) { + ret = tcp_conn_shutdown_tx(c); + if (ret) { + spin_unlock_np(&c->lock); + return ret; + } + } + if (rx) + tcp_conn_shutdown_rx(c); + spin_unlock_np(&c->lock); + + return 0; +} + +/** + * tcp_abort - force an immediate (ungraceful) close of the connection + * @c: the TCP connection to abort + */ +void tcp_abort(tcp_conn_t *c) +{ + int i; + uint32_t snd_nxt; + struct netaddr l, r; + + spin_lock_np(&c->lock); + if (c->pcb.state == TCP_STATE_CLOSED) { + spin_unlock_np(&c->lock); + return; + } + + l = c->e.laddr; + r = c->e.raddr; + tcp_conn_fail(c, ECONNABORTED); + + while (c->tx_exclusive) waitq_wait(&c->tx_wq, &c->lock); + + snd_nxt = c->pcb.snd_nxt; + spin_unlock_np(&c->lock); + + for (i = 0; i < 10; i++) { + if (tcp_tx_raw_rst(l, r, snd_nxt) == 0) + return; + timer_sleep(10); + } + + log_warn("tcp: failed to transmit TCP_RST"); +} + +/** + * tcp_close - frees a TCP connection + * @c: the TCP connection to free + * + * WARNING: Only the last reference can safely call this method. Call + * tcp_shutdown() or tcp_abort() first if any threads are sleeping on the + * socket. + */ +void tcp_close(tcp_conn_t *c) +{ + int ret; + + spin_lock_np(&c->lock); + BUG_ON(!waitq_empty(&c->rx_wq)); + ret = tcp_conn_shutdown_tx(c); + if (ret) + tcp_conn_fail(c, -ret); + tcp_conn_shutdown_rx(c); + spin_unlock_np(&c->lock); + + tcp_conn_put(c); +} + +/** + * tcp_init_late - starts the TCP worker thread + * + * Returns 0 if successful. + */ +int tcp_init_late(void) +{ + return 0; +} diff --git a/libos/net/tcp.h b/libos/net/tcp.h new file mode 100644 index 0000000..7616715 --- /dev/null +++ b/libos/net/tcp.h @@ -0,0 +1,184 @@ +/* + * tcp.h - local header for TCP support + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include "waitq.h" + +/* adjustable constants */ +#define TCP_MSS (ETH_MTU - sizeof(struct ip_hdr) - sizeof(struct tcp_hdr)) +#define TCP_WIN ((65535 / TCP_MSS) * TCP_MSS) +#define TCP_ACK_TIMEOUT (10 * USEC_PER_MSEC) +#define TCP_OOQ_ACK_TIMEOUT (300 * USEC_PER_MSEC) +#define TCP_TIME_WAIT_TIMEOUT (1 * USEC_PER_SEC) /* FIXME: should be 8 minutes */ +#define TCP_RETRANSMIT_TIMEOUT (300 * USEC_PER_MSEC) /* FIXME: should be dynamic */ +#define TCP_FAST_RETRANSMIT_THRESH 3 +#define TCP_OOO_MAX_SIZE 2048 +#define TCP_RETRANSMIT_BATCH 16 + +/* connecion states (RFC 793 Section 3.2) */ +enum { + TCP_STATE_SYN_SENT = 0, + TCP_STATE_SYN_RECEIVED, + TCP_STATE_ESTABLISHED, + TCP_STATE_FIN_WAIT1, + TCP_STATE_FIN_WAIT2, + TCP_STATE_CLOSE_WAIT, + TCP_STATE_CLOSING, + TCP_STATE_LAST_ACK, + TCP_STATE_TIME_WAIT, + TCP_STATE_CLOSED, +}; + +/* TCP protocol control block (PCB) */ +struct tcp_pcb { + int state; /* the connection state */ + + /* send sequence variables (RFC 793 Section 3.2) */ + uint32_t snd_una; /* send unacknowledged */ + uint32_t snd_nxt; /* send next */ + uint32_t snd_wnd; /* send window */ + uint32_t snd_up; /* send urgent pointer */ + uint32_t snd_wl1; /* last window update - seq number */ + uint32_t snd_wl2; /* last window update - ack number */ + uint32_t iss; /* initial send sequence number */ + + /* receive sequence variables (RFC 793 Section 3.2) */ + union { + struct { + uint32_t rcv_nxt; /* receive next */ + uint32_t rcv_wnd; /* receive window */ + }; + uint64_t rcv_nxt_wnd; + }; + uint32_t rcv_up; /* receive urgent pointer */ + uint32_t irs; /* initial receive sequence number */ +}; + +/* the TCP connection struct */ +struct tcp_conn { + struct trans_entry e; + struct tcp_pcb pcb; + struct list_node global_link; + struct list_node queue_link; + spinlock_t lock; + struct kref ref; + int err; /* error code for read(), write(), etc. */ + + /* ingress path */ + unsigned int rx_closed : 1; + unsigned int rx_exclusive : 1; + waitq_t rx_wq; + struct list_head rxq_ooo; + struct list_head rxq; + + /* egress path */ + unsigned int tx_closed : 1; + unsigned int tx_exclusive : 1; + waitq_t tx_wq; + uint32_t tx_last_ack; + uint16_t tx_last_win; + struct mbuf *tx_pending; + struct list_head txq; + bool do_fast_retransmit; + uint32_t fast_retransmit_last_ack; + + /* timeouts */ + uint64_t next_timeout; + bool ack_delayed; + bool rcv_wnd_full; + uint64_t ack_ts; + uint64_t time_wait_ts; + int rep_acks; +}; +tcp_conn_t *tcp_conn_alloc(void); +int tcp_conn_attach(tcp_conn_t *c, struct netaddr laddr, struct netaddr raddr); +void tcp_conn_ack(tcp_conn_t *c, struct list_head *freeq); +void tcp_conn_set_state(tcp_conn_t *c, int new_state); +void tcp_conn_fail(tcp_conn_t *c, int err); +void tcp_conn_shutdown_rx(tcp_conn_t *c); +void tcp_conn_destroy(tcp_conn_t *c); + +void tcp_timer_update(tcp_conn_t *c); + +/** + * tcp_conn_get - increments the connection ref count + * @c: the connection to increment + * + * Returns @c. + */ +static inline tcp_conn_t *tcp_conn_get(tcp_conn_t *c) +{ + kref_get(&c->ref); + return c; +} + +void tcp_conn_release_ref(struct kref *r); + +/** + * tcp_conn_put - decrements the connection ref count + * @c: the connection to decrement + */ +static inline void tcp_conn_put(tcp_conn_t *c) +{ + kref_put(&c->ref, tcp_conn_release_ref); +} + +/* + * ingress path + */ + +void tcp_rx_conn(struct trans_entry *e, struct mbuf *m); +tcp_conn_t *tcp_rx_listener(struct netaddr laddr, struct mbuf *m); + +/* + * egress path + */ + +int tcp_tx_raw_rst(struct netaddr laddr, struct netaddr raddr, tcp_seq seq); +int tcp_tx_raw_rst_ack(struct netaddr laddr, struct netaddr raddr, tcp_seq seq, tcp_seq ack); +int tcp_tx_ack(tcp_conn_t *c); +int tcp_tx_ctl(tcp_conn_t *c, uint8_t flags); +ssize_t tcp_tx_send(tcp_conn_t *c, const void *buf, size_t len, bool push); +void tcp_tx_retransmit(tcp_conn_t *c); +struct mbuf *tcp_tx_fast_retransmit_start(tcp_conn_t *c); +void tcp_tx_fast_retransmit_finish(tcp_conn_t *c, struct mbuf *m); + +/* + * utilities + */ + +/* free all mbufs in a linked list */ +static inline void mbuf_list_free(struct list_head *h) +{ + struct mbuf *m; + + while (true) { + m = list_pop(h, struct mbuf, link); + if (!m) + break; + + mbuf_free(m); + } +} + +/* + * debugging + */ + +// #if DEBUG +// extern void tcp_debug_egress_pkt(tcp_conn_t *c, struct mbuf *m); +// extern void tcp_debug_ingress_pkt(tcp_conn_t *c, struct mbuf *m); +// extern void tcp_debug_state_change(tcp_conn_t *c, int last, int next); +// #else /* DEBUG */ +static inline void tcp_debug_egress_pkt(tcp_conn_t *c, struct mbuf *m) {} +static inline void tcp_debug_ingress_pkt(tcp_conn_t *c, struct mbuf *m) {} +static inline void tcp_debug_state_change(tcp_conn_t *c, int last, int next) {} +// #endif /* DEBUG */ \ No newline at end of file diff --git a/libos/net/tcp_in.c b/libos/net/tcp_in.c new file mode 100644 index 0000000..24a44b1 --- /dev/null +++ b/libos/net/tcp_in.c @@ -0,0 +1,509 @@ +/* + * tcp_in.c - the ingress datapath for TCP + * + * Based on RFC 793 and RFC 1122 (errata). + * + * FIXME: We do too little to prevent heavy fragmentation in the out-of-order + * RX queue. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tcp.h" + +/* four cases for the acceptability test for an incoming segment */ +static bool is_acceptable(tcp_conn_t *c, uint32_t len, uint32_t seq) +{ + assert_spin_lock_held(&c->lock); + + if (len == 0 && c->pcb.rcv_wnd == 0) { + return seq == c->pcb.rcv_nxt; + } else if (len == 0 && c->pcb.rcv_wnd > 0) { + return wraps_lte(c->pcb.rcv_nxt, seq) && wraps_lt(seq, c->pcb.rcv_nxt + c->pcb.rcv_wnd); + } else if (len > 0 && c->pcb.rcv_wnd == 0) { + return false; + } + + /* (len > 0 && c->rcv_wnd > 0) */ + return (wraps_lte(c->pcb.rcv_nxt, seq) && wraps_lt(seq, c->pcb.rcv_nxt + c->pcb.rcv_wnd)) || + (wraps_lte(c->pcb.rcv_nxt, seq + len - 1) && + wraps_lt(seq + len - 1, c->pcb.rcv_nxt + c->pcb.rcv_wnd)); +} + +/* is the TX window full? */ +static bool is_snd_full(tcp_conn_t *c) +{ + assert_spin_lock_held(&c->lock); + + return wraps_lte(c->pcb.snd_una + c->pcb.snd_wnd, c->pcb.snd_nxt); +} + +/* see reset generation (RFC 793) */ +static void send_rst(tcp_conn_t *c, bool acked, uint32_t seq, uint32_t ack, uint32_t len) +{ + if (acked) { + tcp_tx_raw_rst(c->e.laddr, c->e.raddr, ack); + return; + } + tcp_tx_raw_rst_ack(c->e.laddr, c->e.raddr, 0, seq + len); +} + +static void tcp_rx_append_text(tcp_conn_t *c, struct mbuf *m) +{ + uint32_t len; + + assert_spin_lock_held(&c->lock); + + /* verify assumptions enforced by acceptability testing */ + assert(wraps_lte(m->seg_seq, c->pcb.rcv_nxt)); + assert(wraps_gt(m->seg_end, c->pcb.rcv_nxt)); + + /* does the next receive octet clip the head of the text? */ + if (wraps_lt(m->seg_seq, c->pcb.rcv_nxt)) { + len = c->pcb.rcv_nxt - m->seg_seq; + mbuf_pull(m, len); + m->seg_seq += len; + } + + /* does the receive window clip the tail of the text? */ + if (wraps_lt(c->pcb.rcv_nxt + c->pcb.rcv_wnd, m->seg_end)) { + len = m->seg_end - (c->pcb.rcv_nxt + c->pcb.rcv_wnd); + mbuf_trim(m, len); + m->seg_end = c->pcb.rcv_nxt + c->pcb.rcv_wnd; + } + + /* enqueue the text */ + assert(c->pcb.rcv_wnd >= m->seg_end - m->seg_seq); + uint64_t nxt_wnd = + (uint64_t)m->seg_end | ((uint64_t)(c->pcb.rcv_wnd - (m->seg_end - m->seg_seq)) << 32); + atomic_store_rel(&c->pcb.rcv_nxt_wnd, nxt_wnd); + if (c->pcb.rcv_wnd == 0) + c->rcv_wnd_full = true; + list_add_tail(&c->rxq, &m->link); +} + +/* process RX text segments, returning true if @m is used for text */ +static bool tcp_rx_text(tcp_conn_t *c, struct mbuf *m, bool *wake) +{ + struct mbuf *pos; + + assert_spin_lock_held(&c->lock); + + /* don't accept any text if the receive window is zero */ + if (c->pcb.rcv_wnd == 0) + return false; + + if (wraps_lte(m->seg_seq, c->pcb.rcv_nxt)) { + /* we got the next in-order segment */ + if ((m->flags & (TCP_PUSH | TCP_FIN)) > 0) + *wake = true; + tcp_rx_append_text(c, m); + } else { + /* we got an out-of-order segment */ + int size = 0; + list_for_each(&c->rxq_ooo, pos, link) + { + if (wraps_lt(m->seg_seq, pos->seg_seq)) { + list_add_before(&pos->link, &m->link); + goto drain; + } else if (wraps_lte(m->seg_end, pos->seg_end)) { + return false; + } + size++; + } + + if (size >= TCP_OOO_MAX_SIZE) + return false; + + list_add_tail(&c->rxq_ooo, &m->link); + } + +drain: + /* attempt to drain the out-of-order RX queue */ + while (true) { + pos = list_top(&c->rxq_ooo, struct mbuf, link); + if (!pos) + break; + + /* has the segment been fully received already? */ + if (wraps_lte(pos->seg_end, c->pcb.rcv_nxt)) { + list_del(&pos->link); + mbuf_free(pos); + continue; + } + + /* is the segment still out-of-order? */ + if (wraps_gt(pos->seg_seq, c->pcb.rcv_nxt)) + break; + + /* we got the next in-order segment */ + list_del(&pos->link); + if ((m->flags & (TCP_PUSH | TCP_FIN)) > 0) + *wake = true; + tcp_rx_append_text(c, pos); + } + + if (c->pcb.rcv_wnd == 0) + *wake = true; + + return true; +} + +/* handles ingress packets for TCP connections */ +void tcp_rx_conn(struct trans_entry *e, struct mbuf *m) +{ + tcp_conn_t *c = container_of(e, tcp_conn_t, e); + struct list_head q, waiters; + struct task *rx_task = NULL; + struct mbuf *retransmit = NULL; + const struct ip_hdr *iphdr; + const struct tcp_hdr *tcphdr; + uint32_t seq, ack, len, snd_nxt, hdr_len; + uint16_t win; + bool do_ack = false, do_drop = true; + int ret; + + list_head_init(&q); + list_head_init(&waiters); + snd_nxt = atomic_load_acq(&c->pcb.snd_nxt); + + /* find header offsets */ + iphdr = mbuf_network_hdr(m, *iphdr); + tcphdr = mbuf_pull_hdr_or_null(m, *tcphdr); + if (unlikely(!tcphdr)) { + mbuf_free(m); + return; + } + + /* parse header */ + seq = ntoh32(tcphdr->seq); + ack = ntoh32(tcphdr->ack); + win = ntoh16(tcphdr->win); + hdr_len = tcphdr->off * 4; + if (unlikely(hdr_len < sizeof(struct tcp_hdr))) { + mbuf_free(m); + return; + } + len = ntoh16(iphdr->len) - sizeof(*iphdr) - hdr_len; + if (unlikely(len > mbuf_length(m))) { + mbuf_free(m); + return; + } + if (unlikely((tcphdr->flags & TCP_FIN) > 0)) + len++; + mbuf_pull(m, hdr_len - sizeof(struct tcp_hdr)); /* strip off options */ + + spin_lock_np(&c->lock); + + if (c->pcb.state == TCP_STATE_CLOSED) { + if ((tcphdr->flags & TCP_RST) == 0) + send_rst(c, false, seq, ack, len); + goto done; + } + + if (c->pcb.state == TCP_STATE_SYN_SENT) { + if ((tcphdr->flags & TCP_ACK) > 0) { + if (wraps_lte(ack, c->pcb.iss) || wraps_gt(ack, snd_nxt)) { + send_rst(c, false, seq, ack, len); + goto done; + } + if ((tcphdr->flags & TCP_RST) > 0) { + /* check if the ack is valid */ + if (wraps_lte(c->pcb.snd_una, ack) && wraps_lte(ack, snd_nxt)) { + tcp_conn_fail(c, ECONNRESET); + goto done; + } + } + } else if ((tcphdr->flags & TCP_RST) > 0) { + goto done; + } + if ((tcphdr->flags & TCP_SYN) > 0) { + c->pcb.rcv_nxt = seq + 1; + c->pcb.irs = seq; + if ((tcphdr->flags & TCP_ACK) > 0) { + c->pcb.snd_una = ack; + tcp_conn_ack(c, &q); + } + if (wraps_gt(c->pcb.snd_una, c->pcb.iss)) { + do_ack = true; + c->pcb.snd_wnd = win > 1 ? win - 2 : 0; // reserve 1 byte for FIN and one byte for + // the sequence number on an RST packet + c->pcb.snd_wl1 = seq; + c->pcb.snd_wl2 = ack; + tcp_conn_set_state(c, TCP_STATE_ESTABLISHED); + } else { + ret = tcp_tx_ctl(c, TCP_SYN | TCP_ACK); + if (unlikely(ret)) { + goto done; /* feign packet loss */ + } + tcp_conn_set_state(c, TCP_STATE_SYN_RECEIVED); + } + } + goto done; + } + + /* + * TCP_STATE_SYN_RECEIVED || TCP_STATE_ESTABLISHED || + * TCP_STATE_FIN_WAIT1 || TCP_STATE_FIN_WAIT2 || + * TCP_STATE_CLOSE_WAIT || TCP_STATE_CLOSING || + * TCP_STATE_LAST_ACK || TCP_STATE_TIME_WAIT + */ + + /* step 1 - acceptability testing */ + if (!is_acceptable(c, len, seq)) { + do_ack = (tcphdr->flags & TCP_RST) == 0; + goto done; + } + + /* step 2 - RST */ + if ((tcphdr->flags & TCP_RST) > 0) { + tcp_conn_fail(c, ECONNRESET); + goto done; + } + + /* step 3 - security checks skipped */ + + /* step 4 - SYN */ + if ((tcphdr->flags & TCP_SYN) > 0) { + send_rst(c, (tcphdr->flags & TCP_ACK) > 0, seq, ack, len); + tcp_conn_fail(c, ECONNRESET); + goto done; + } + + /* step 5 - ACK */ + if ((tcphdr->flags & TCP_ACK) == 0) { + goto done; + } + if (c->pcb.state == TCP_STATE_SYN_RECEIVED) { + if (!(wraps_lte(c->pcb.snd_una, ack) && wraps_lte(ack, snd_nxt))) { + send_rst(c, true, seq, ack, len); + do_drop = true; + goto done; + } + c->pcb.snd_wnd = + win > 1 + ? win - 2 + : 0; // reserve 1 byte for FIN and one byte for the sequence number on an RST packet + c->pcb.snd_wl1 = seq; + c->pcb.snd_wl2 = ack; + tcp_conn_set_state(c, TCP_STATE_ESTABLISHED); + } + /* + * Detect a duplicate ACK if: + * 1. The ACK number is the same as the largest seen. + * 2. There is unacknowledged data pending. + * 3. There is no data payload included with the ACK. + * 4. There is no window update. + */ + if (ack == c->pcb.snd_una && c->pcb.snd_una != c->pcb.snd_nxt && len == 0) { + c->rep_acks++; + if (c->rep_acks >= TCP_FAST_RETRANSMIT_THRESH) { + if (c->tx_exclusive) { + c->do_fast_retransmit = true; + c->fast_retransmit_last_ack = ack; + } else { + retransmit = tcp_tx_fast_retransmit_start(c); + } + c->rep_acks = 0; + } + } + bool snd_was_full = is_snd_full(c); + if (wraps_lte(c->pcb.snd_una, ack) && wraps_lte(ack, snd_nxt)) { + if (c->pcb.snd_una != ack) + c->rep_acks = 0; + c->pcb.snd_una = ack; + tcp_conn_ack(c, &q); + } else if (wraps_gt(ack, snd_nxt)) { + do_ack = true; + goto done; + } + /* should we update the send window? */ + if (wraps_lt(c->pcb.snd_wl1, seq) || + (c->pcb.snd_wl1 == seq && wraps_lte(c->pcb.snd_wl2, ack))) { + c->pcb.snd_wnd = + win > 1 + ? win - 2 + : 0; // reserve 1 byte for FIN and one byte for the sequence number on an RST packet + c->pcb.snd_wl1 = seq; + c->pcb.snd_wl2 = ack; + c->rep_acks = 0; + } + if (snd_was_full && !is_snd_full(c)) + waitq_release_start(&c->tx_wq, &waiters); + + if (c->pcb.state == TCP_STATE_FIN_WAIT1 && c->pcb.snd_una == snd_nxt) { + tcp_conn_set_state(c, TCP_STATE_FIN_WAIT2); + } else if (c->pcb.state == TCP_STATE_CLOSING && c->pcb.snd_una == snd_nxt) { + c->time_wait_ts = now_us(); + tcp_conn_set_state(c, TCP_STATE_TIME_WAIT); + } else if (c->pcb.state == TCP_STATE_LAST_ACK && c->pcb.snd_una == snd_nxt) { + tcp_conn_set_state(c, TCP_STATE_CLOSED); + tcp_conn_put(c); /* safe because RCU + preempt is disabled */ + goto done; + } + + /* step 6 - URG support skipped */ + + /* step 7 - segment text */ + if (len > 0 && (c->pcb.state == TCP_STATE_ESTABLISHED || c->pcb.state == TCP_STATE_FIN_WAIT1 || + c->pcb.state == TCP_STATE_FIN_WAIT2)) { + bool wake = false; + m->seg_seq = seq; + m->seg_end = seq + len; + m->flags = tcphdr->flags; + +#ifdef TCP_RX_STATS + uint64_t before_tsc = rdtsc(); + do_drop = !tcp_rx_text(c, m, &wake); + STAT(RX_TCP_TEXT_CYCLES) += rdtsc() - before_tsc; +#else + do_drop = !tcp_rx_text(c, m, &wake); +#endif + + if (wake) { + assert(!list_empty(&c->rxq)); + assert(do_drop == false); + rx_task = waitq_signal(&c->rx_wq, &c->lock); + } + if (!c->ack_delayed) { + c->ack_delayed = true; + c->ack_ts = now_us(); + } + do_ack |= !list_empty(&c->rxq_ooo); + } + + /* step 8 - FIN */ + if (likely((tcphdr->flags & TCP_FIN) == 0)) + goto done; + if (c->pcb.state == TCP_STATE_SYN_RECEIVED || c->pcb.state == TCP_STATE_ESTABLISHED) { + tcp_conn_set_state(c, TCP_STATE_CLOSE_WAIT); + } else if (c->pcb.state == TCP_STATE_FIN_WAIT1) { + assert(c->pcb.snd_una != snd_nxt); + tcp_conn_set_state(c, TCP_STATE_CLOSING); + } else if (c->pcb.state == TCP_STATE_FIN_WAIT2) { + c->time_wait_ts = now_us(); + do_ack = true; + tcp_conn_set_state(c, TCP_STATE_TIME_WAIT); + } + +done: + tcp_timer_update(c); + tcp_debug_ingress_pkt(c, m); + spin_unlock_np(&c->lock); + + /* deferred work (delayed until after the lock was dropped) */ + waitq_release_finish(&waiters); + if (rx_task) + waitq_signal_finish(rx_task); + mbuf_list_free(&q); + tcp_tx_fast_retransmit_finish(c, retransmit); + if (do_ack) + tcp_tx_ack(c); + if (do_drop) + mbuf_free(m); +} + +/* handles ingress packets for TCP listener queues */ +tcp_conn_t *tcp_rx_listener(struct netaddr laddr, struct mbuf *m) +{ + struct netaddr raddr; + const struct ip_hdr *iphdr; + const struct tcp_hdr *tcphdr; + tcp_conn_t *c; + int ret; + + /* find header offsets */ + iphdr = mbuf_network_hdr(m, *iphdr); + tcphdr = mbuf_pull_hdr_or_null(m, *tcphdr); + if (unlikely(!tcphdr)) + return NULL; + + /* calculate local and remote network addresses */ + raddr.ip = ntoh32(iphdr->saddr); + raddr.port = ntoh16(tcphdr->sport); + + /* do exactly what RFC 793 says */ + if ((tcphdr->flags & TCP_RST) > 0) + return NULL; + if ((tcphdr->flags & TCP_ACK) > 0) { + tcp_tx_raw_rst(laddr, raddr, ntoh32(tcphdr->ack)); + return NULL; + } + if ((tcphdr->flags & TCP_SYN) == 0) + return NULL; + + /* TODO: the spec requires us to enqueue but not post any data */ + if (ntoh16(iphdr->len) - sizeof(*iphdr) != tcphdr->off * 4) + return NULL; + + /* we have a valid SYN packet, initialize a new connection */ + c = tcp_conn_alloc(); + if (unlikely(!c)) + return NULL; + c->pcb.irs = ntoh32(tcphdr->seq); + c->pcb.rcv_nxt = c->pcb.irs + 1; + + /* + * attach the connection to the transport layer. From this point onward + * ingress packets can be dispatched to the connection. + */ + ret = tcp_conn_attach(c, laddr, raddr); + if (unlikely(ret)) { + sfree(c); + return NULL; + } + tcp_debug_ingress_pkt(c, m); + + /* finally, send a SYN/ACK to the remote host */ + spin_lock_np(&c->lock); + ret = tcp_tx_ctl(c, TCP_SYN | TCP_ACK); + if (unlikely(ret)) { + spin_unlock_np(&c->lock); + tcp_conn_destroy(c); + return NULL; + } + tcp_conn_get(c); /* take a ref for the state machine */ + tcp_conn_set_state(c, TCP_STATE_SYN_RECEIVED); + spin_unlock_np(&c->lock); + + return c; +} + +void tcp_rx_closed(struct mbuf *m) +{ + struct netaddr l, r; + uint32_t len; + const struct ip_hdr *iphdr; + const struct tcp_hdr *tcphdr; + + iphdr = mbuf_network_hdr(m, *iphdr); + tcphdr = mbuf_pull_hdr_or_null(m, *tcphdr); + if (!tcphdr) + return; + + if ((tcphdr->flags & TCP_RST) > 0) + return; + + l.ip = ntoh32(iphdr->daddr); + l.port = ntoh16(tcphdr->dport); + + r.ip = ntoh32(iphdr->saddr); + r.port = ntoh16(tcphdr->sport); + + if ((tcphdr->flags & TCP_ACK) > 0) { + tcp_tx_raw_rst(l, r, ntoh32(tcphdr->ack)); + } else { + len = ntoh16(iphdr->len) - sizeof(*iphdr) - tcphdr->off * 4; + tcp_tx_raw_rst_ack(l, r, 0, ntoh32(tcphdr->seq) + len); + } +} diff --git a/libos/net/tcp_out.c b/libos/net/tcp_out.c new file mode 100644 index 0000000..5079da2 --- /dev/null +++ b/libos/net/tcp_out.c @@ -0,0 +1,389 @@ +/* + * tcp_out.c - the egress datapath for TCP + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tcp.h" + +static void tcp_tx_release_mbuf(struct mbuf *m) +{ + if (atomic_dec_zero(&m->ref)) + net_tx_release_mbuf(m); +} + +static struct tcp_hdr *tcp_push_tcphdr(struct mbuf *m, tcp_conn_t *c, uint8_t flags, uint16_t l4len) +{ + struct tcp_hdr *tcphdr; + uint64_t rcv_nxt_wnd = atomic_load_acq(&c->pcb.rcv_nxt_wnd); + tcp_seq ack = c->tx_last_ack = (uint32_t)rcv_nxt_wnd; + uint16_t win = c->tx_last_win = rcv_nxt_wnd >> 32; + + /* write the tcp header */ + tcphdr = mbuf_push_hdr(m, *tcphdr); + mbuf_mark_transport_offset(m); + tcphdr->sport = hton16(c->e.laddr.port); + tcphdr->dport = hton16(c->e.raddr.port); + tcphdr->ack = hton32(ack); + tcphdr->off = 5; + tcphdr->flags = flags; + tcphdr->win = hton16(win); + tcphdr->seq = hton32(m->seg_seq); + tcphdr->sum = + ipv4_phdr_cksum(IPPROTO_TCP, c->e.laddr.ip, c->e.raddr.ip, sizeof(struct tcp_hdr) + l4len); + return tcphdr; +} + +/** + * tcp_tx_raw_rst - send a RST without an established connection + * @laddr: the local address + * @raddr: the remote address + * @seq: the segement's sequence number + * + * Returns 0 if successful, otherwise fail. + */ +int tcp_tx_raw_rst(struct netaddr laddr, struct netaddr raddr, tcp_seq seq) +{ + struct tcp_hdr *tcphdr; + struct mbuf *m; + int ret; + + m = net_tx_alloc_mbuf(); + if (unlikely((!m))) + return -ENOMEM; + + m->txflags = OLFLAG_TCP_CHKSUM; + + /* write the tcp header */ + tcphdr = mbuf_push_hdr(m, *tcphdr); + tcphdr->sport = hton16(laddr.port); + tcphdr->dport = hton16(raddr.port); + tcphdr->seq = hton32(seq); + tcphdr->ack = hton32(0); + tcphdr->off = 5; + tcphdr->flags = TCP_RST; + tcphdr->win = hton16(0); + tcphdr->sum = ipv4_phdr_cksum(IPPROTO_TCP, laddr.ip, raddr.ip, sizeof(struct tcp_hdr)); + + /* transmit packet */ + ret = net_tx_ip(m, IPPROTO_TCP, raddr.ip); + if (unlikely(ret)) + mbuf_free(m); + return ret; +} + +/** + * tcp_tx_raw_rst_ack - send a RST/ACK without an established connection + * @laddr: the local address + * @raddr: the remote address + * @seq: the segment's sequence number + * @ack: the segment's acknowledgement number + * + * Returns 0 if successful, otherwise fail. + */ +int tcp_tx_raw_rst_ack(struct netaddr laddr, struct netaddr raddr, tcp_seq seq, tcp_seq ack) +{ + struct tcp_hdr *tcphdr; + struct mbuf *m; + int ret; + + m = net_tx_alloc_mbuf(); + if (unlikely((!m))) + return -ENOMEM; + + m->txflags = OLFLAG_TCP_CHKSUM; + + /* write the tcp header */ + tcphdr = mbuf_push_hdr(m, *tcphdr); + tcphdr->sport = hton16(laddr.port); + tcphdr->dport = hton16(raddr.port); + tcphdr->seq = hton32(seq); + tcphdr->ack = hton32(ack); + tcphdr->off = 5; + tcphdr->flags = TCP_RST | TCP_ACK; + tcphdr->win = hton16(0); + tcphdr->sum = ipv4_phdr_cksum(IPPROTO_TCP, laddr.ip, raddr.ip, sizeof(struct tcp_hdr)); + + /* transmit packet */ + ret = net_tx_ip(m, IPPROTO_TCP, raddr.ip); + if (unlikely(ret)) + mbuf_free(m); + return ret; +} + +/** + * tcp_tx_ack - send an acknowledgement and window update packet + * @c: the connection to send the ACK + * + * Returns 0 if succesful, otherwise fail. + */ +int tcp_tx_ack(tcp_conn_t *c) +{ + struct mbuf *m; + int ret; + + m = net_tx_alloc_mbuf(); + if (unlikely(!m)) + return -ENOMEM; + + m->txflags = OLFLAG_TCP_CHKSUM; + m->seg_seq = atomic_load_acq(&c->pcb.snd_nxt); + tcp_push_tcphdr(m, c, TCP_ACK, 0); + + /* transmit packet */ + tcp_debug_egress_pkt(c, m); + ret = net_tx_ip(m, IPPROTO_TCP, c->e.raddr.ip); + if (unlikely(ret)) + mbuf_free(m); + return ret; +} + +/** + * tcp_tx_ctl - sends a control message without data + * @c: the TCP connection + * @flags: the control flags (e.g. TCP_SYN, TCP_FIN, etc.) + * + * WARNING: The caller must have write exclusive access to the socket or hold + * @c->lock while write exclusion isn't taken. + * + * Returns 0 if successful, -ENOMEM if out memory. + */ +int tcp_tx_ctl(tcp_conn_t *c, uint8_t flags) +{ + struct mbuf *m; + int ret; + + BUG_ON(!c->tx_exclusive && !spin_lock_held(&c->lock)); + + m = net_tx_alloc_mbuf(); + if (unlikely(!m)) + return -ENOMEM; + + m->txflags = OLFLAG_TCP_CHKSUM; + m->seg_seq = c->pcb.snd_nxt; + m->seg_end = c->pcb.snd_nxt + 1; + m->flags = flags; + tcp_push_tcphdr(m, c, flags, 0); + atomic_store_rel(&c->pcb.snd_nxt, c->pcb.snd_nxt + 1); + list_add_tail(&c->txq, &m->link); + m->timestamp = now_us(); + atomic_store(&m->ref, 2); + m->release = tcp_tx_release_mbuf; + tcp_debug_egress_pkt(c, m); + ret = net_tx_ip(m, IPPROTO_TCP, c->e.raddr.ip); + if (unlikely(ret)) { + /* pretend the packet was sent */ + atomic_store(&m->ref, 1); + } + return ret; +} + +/** + * tcp_tx_send - transmit a buffer on a TCP connection + * @c: the TCP connection + * @buf: the buffer to transmit + * @len: the length of the buffer to transmit + * @push: indicates the data is ready for consumption by the receiver + * + * If @push is false, the implementation may buffer some or all of the data for + * future transmission. + * + * WARNING: The caller is responsible for respecting the TCP window size limit. + * WARNING: The caller must have write exclusive access to the socket or hold + * @c->lock while write exclusion isn't taken. + * + * Returns the number of bytes transmitted, or < 0 if there was an error. + */ +ssize_t tcp_tx_send(tcp_conn_t *c, const void *buf, size_t len, bool push) +{ + struct mbuf *m; + const char *pos = buf; + const char *end = pos + len; + ssize_t ret = 0; + size_t seglen; + + assert(c->pcb.state >= TCP_STATE_ESTABLISHED); + assert((c->tx_exclusive == true) || spin_lock_held(&c->lock)); + + pos = buf; + end = pos + len; + + /* the main TCP segmenter loop */ + while (pos < end) { + /* allocate a buffer and copy payload data */ + if (c->tx_pending) { + m = c->tx_pending; + c->tx_pending = NULL; + seglen = MIN(end - pos, TCP_MSS - mbuf_length(m)); + m->seg_end += seglen; + } else { + m = net_tx_alloc_mbuf(); + if (unlikely(!m)) { + ret = -ENOBUFS; + break; + } + seglen = MIN(end - pos, TCP_MSS); + m->seg_seq = c->pcb.snd_nxt; + m->seg_end = c->pcb.snd_nxt + seglen; + m->flags = TCP_ACK; + atomic_store(&m->ref, 2); + m->release = tcp_tx_release_mbuf; + } + + memcpy(mbuf_put(m, seglen), pos, seglen); + atomic_store_rel(&c->pcb.snd_nxt, c->pcb.snd_nxt + seglen); + pos += seglen; + + /* if not pushing, keep the last buffer for later */ + if (!push && pos == end && mbuf_length(m) - sizeof(struct tcp_hdr) < TCP_MSS) { + c->tx_pending = m; + break; + } + + /* initialize TCP header */ + if (push && pos == end) + m->flags |= TCP_PUSH; + tcp_push_tcphdr(m, c, m->flags, m->seg_end - m->seg_seq); + + /* transmit the packet */ + list_add_tail(&c->txq, &m->link); + tcp_debug_egress_pkt(c, m); + m->timestamp = now_us(); + m->txflags = OLFLAG_TCP_CHKSUM; + ret = net_tx_ip(m, IPPROTO_TCP, c->e.raddr.ip); + if (unlikely(ret)) { + /* pretend the packet was sent */ + atomic_store(&m->ref, 1); + } + } + + /* if we sent anything return the length we sent instead of an error */ + if (pos - (const char *)buf > 0) + ret = pos - (const char *)buf; + return ret; +} + +static int tcp_tx_retransmit_one(tcp_conn_t *c, struct mbuf *m) +{ + int ret; + uint16_t l4len; + + l4len = m->seg_end - m->seg_seq; + if (m->flags & (TCP_SYN | TCP_FIN)) + l4len--; + + /* + * Check if still transmitting. Because of a limitation in some DPDK NIC + * drivers, completions could be delayed long after transmission is + * finished. We copy the packet to allow retransmission to still succeed + * in such corner cases. + */ + if (unlikely(atomic_load(&m->ref) != 1)) { + struct mbuf *newm = net_tx_alloc_mbuf(); + if (unlikely(!newm)) + return -ENOMEM; + memcpy(mbuf_put(newm, l4len), mbuf_transport_offset(m) + sizeof(struct tcp_hdr), l4len); + newm->flags = m->flags; + newm->seg_seq = m->seg_seq; + newm->seg_end = m->seg_end; + newm->txflags = OLFLAG_TCP_CHKSUM; + m = newm; + } else { + /* strip headers and reset ref count */ + mbuf_reset(m, m->transport_off + sizeof(struct tcp_hdr)); + atomic_store(&m->ref, 2); + } + + /* handle a partially acknowledged packet */ + uint32_t una = atomic_load_acq(&c->pcb.snd_una); + if (unlikely(wraps_lte(m->seg_end, una))) { + mbuf_free(m); + return 0; + } else if (unlikely(wraps_lt(m->seg_seq, una))) { + mbuf_pull(m, una - m->seg_seq); + m->seg_seq = una; + } + + /* push the TCP header back on (now with fresher ack) */ + tcp_push_tcphdr(m, c, m->flags, l4len); + + /* transmit the packet */ + tcp_debug_egress_pkt(c, m); + ret = net_tx_ip(m, IPPROTO_TCP, c->e.raddr.ip); + if (unlikely(ret)) + mbuf_free(m); + return ret; +} + +/** + * tcp_tx_fast_retransmit - resend the first pending egress packet + * @c: the TCP connection in which to send retransmissions + */ +struct mbuf *tcp_tx_fast_retransmit_start(tcp_conn_t *c) +{ + struct mbuf *m; + + assert_spin_lock_held(&c->lock); + + if (c->tx_exclusive) + return NULL; + + m = list_top(&c->txq, struct mbuf, link); + if (m) { + m->timestamp = now_us(); + atomic_inc(&m->ref); + } + + return m; +} + +void tcp_tx_fast_retransmit_finish(tcp_conn_t *c, struct mbuf *m) +{ + if (m) { + tcp_tx_retransmit_one(c, m); + mbuf_free(m); + } +} + +/** + * tcp_tx_retransmit - resend any pending egress packets that timed out + * @c: the TCP connection in which to send retransmissions + */ +void tcp_tx_retransmit(tcp_conn_t *c) +{ + struct mbuf *m; + uint64_t now = now_us(); + + assert(spin_lock_held(&c->lock) || c->tx_exclusive); + + int ret; + + int count = 0; + list_for_each(&c->txq, m, link) + { + /* check if the timeout expired */ + if (now - m->timestamp < TCP_RETRANSMIT_TIMEOUT) + break; + + if (wraps_gte(atomic_load_acq(&c->pcb.snd_una), m->seg_end)) + continue; + + m->timestamp = now; + ret = tcp_tx_retransmit_one(c, m); + if (ret) + break; + + if (++count >= TCP_RETRANSMIT_BATCH) + break; + } +} diff --git a/libos/net/transport.c b/libos/net/transport.c new file mode 100644 index 0000000..018988d --- /dev/null +++ b/libos/net/transport.c @@ -0,0 +1,253 @@ +/* + * transport.c - handles transport protocol packets (UDP and TCP) + */ + +#include + +#include +#include +#include +#include +#include + +#define TRANS_TBL_SIZE 16384 + +/* ephemeral port definitions (IANA suggested range) */ +#define MIN_EPHEMERAL 49152 +#define MAX_EPHEMERAL 65535 + +/* a seed value for transport handler table hashing calculations */ +static uint32_t trans_seed; + +/* a simple counter used to further randomize ephemeral ports */ +static uint32_t ephemeral_offset; + +static inline uint32_t trans_hash_3tuple(uint8_t proto, struct netaddr laddr) +{ + return hash_crc32c_one(trans_seed, (uint64_t)laddr.ip | ((uint64_t)laddr.port << 32) | + ((uint64_t)proto << 48)); +} + +static inline uint32_t trans_hash_5tuple(uint8_t proto, struct netaddr laddr, struct netaddr raddr) +{ + return hash_crc32c_two(trans_seed, (uint64_t)laddr.ip | ((uint64_t)laddr.port << 32), + (uint64_t)raddr.ip | ((uint64_t)raddr.port << 32) | + ((uint64_t)proto << 48)); +} + +static DEFINE_SPINLOCK(trans_lock); +static struct rcu_hlist_head trans_tbl[TRANS_TBL_SIZE]; + +/** + * trans_table_add - adds an entry to the match table + * @e: the entry to add + * + * Returns 0 if successful, or -EADDRINUSE if a conflicting entry is already in + * the table, or -EINVAL if the local port is zero. + */ +int trans_table_add(struct trans_entry *e) +{ + struct trans_entry *pos; + struct rcu_hlist_node *node; + uint32_t idx; + + /* port zero is reserved for ephemeral port auto-assign */ + if (e->laddr.port == 0) + return -EINVAL; + + assert(e->match == TRANS_MATCH_3TUPLE || e->match == TRANS_MATCH_5TUPLE); + if (e->match == TRANS_MATCH_3TUPLE) + idx = trans_hash_3tuple(e->proto, e->laddr); + else + idx = trans_hash_5tuple(e->proto, e->laddr, e->raddr); + idx %= TRANS_TBL_SIZE; + + spin_lock_np(&trans_lock); + rcu_hlist_for_each(&trans_tbl[idx], node, true) + { + pos = rcu_hlist_entry(node, struct trans_entry, link); + if (pos->match != e->match) + continue; + if (e->match == TRANS_MATCH_3TUPLE && e->proto == pos->proto && + e->laddr.ip == pos->laddr.ip && e->laddr.port == pos->laddr.port) { + spin_unlock_np(&trans_lock); + return -EADDRINUSE; + } else if (e->proto == pos->proto && e->laddr.ip == pos->laddr.ip && + e->laddr.port == pos->laddr.port && e->raddr.ip == pos->raddr.ip && + e->raddr.port == pos->raddr.port) { + spin_unlock_np(&trans_lock); + return -EADDRINUSE; + } + } + rcu_hlist_add_head(&trans_tbl[idx], &e->link); + atomic_store_rel(&ephemeral_offset, ephemeral_offset + 1); + spin_unlock_np(&trans_lock); + + return 0; +} + +/** + * trans_table_add_with_ephemeral_port - adds an entry to the match table + * while automatically selecting the local port number + * @e: the entry to add + * + * We use algorithm 3 from RFC 6056. + * + * Returns 0 if successful or -EADDRNOTAVAIL if all ports are taken. + */ +int trans_table_add_with_ephemeral_port(struct trans_entry *e) +{ + uint16_t offset, next_ephemeral = 0; + uint16_t num_ephemeral = MAX_EPHEMERAL - MIN_EPHEMERAL + 1; + int ret; + + if (e->match != TRANS_MATCH_5TUPLE) + return -EINVAL; + + e->laddr.port = 0; + offset = trans_hash_5tuple(e->proto, e->laddr, e->raddr) + atomic_load_acq(&ephemeral_offset); + while (next_ephemeral < num_ephemeral) { + uint32_t port = MIN_EPHEMERAL + (next_ephemeral++ + offset) % num_ephemeral; + e->laddr.port = port; + ret = trans_table_add(e); + if (!ret) + return 0; + } + + return -EADDRNOTAVAIL; +} + +/** + * trans_table_remove - removes an entry from the match table + * @e: the entry to remove + * + * The caller is responsible for eventually freeing the object with rcu_free(). + */ +void trans_table_remove(struct trans_entry *e) +{ + spin_lock_np(&trans_lock); + rcu_hlist_del(&e->link); + spin_unlock_np(&trans_lock); +} + +/* the first 4 bytes are identical for TCP and UDP */ +struct l4_hdr { + uint16_t sport, dport; +}; + +static struct trans_entry *trans_lookup(struct mbuf *m) +{ + const struct ip_hdr *iphdr; + const struct l4_hdr *l4hdr; + struct trans_entry *e; + struct rcu_hlist_node *node; + struct netaddr laddr, raddr; + uint32_t hash; + + assert(rcu_read_lock_held()); + + /* set up the network header pointers */ + mbuf_mark_transport_offset(m); + iphdr = mbuf_network_hdr(m, *iphdr); + if (unlikely(iphdr->proto != IPPROTO_UDP && iphdr->proto != IPPROTO_TCP)) + return NULL; + l4hdr = (struct l4_hdr *)mbuf_data(m); + if (unlikely(mbuf_length(m) < sizeof(*l4hdr))) + return NULL; + + /* parse the source and destination network address */ + laddr.ip = ntoh32(iphdr->daddr); + laddr.port = ntoh16(l4hdr->dport); + raddr.ip = ntoh32(iphdr->saddr); + raddr.port = ntoh16(l4hdr->sport); + + /* attempt to find a 5-tuple match */ + hash = trans_hash_5tuple(iphdr->proto, laddr, raddr); + rcu_hlist_for_each(&trans_tbl[hash % TRANS_TBL_SIZE], node, false) + { + e = rcu_hlist_entry(node, struct trans_entry, link); + if (e->match != TRANS_MATCH_5TUPLE) + continue; + if (e->proto == iphdr->proto && e->laddr.ip == laddr.ip && e->laddr.port == laddr.port && + e->raddr.ip == raddr.ip && e->raddr.port == raddr.port) { + return e; + } + } + + /* attempt to find a 3-tuple match */ + hash = trans_hash_3tuple(iphdr->proto, laddr); + rcu_hlist_for_each(&trans_tbl[hash % TRANS_TBL_SIZE], node, false) + { + e = rcu_hlist_entry(node, struct trans_entry, link); + if (e->match != TRANS_MATCH_3TUPLE) + continue; + if (e->proto == iphdr->proto && e->laddr.ip == laddr.ip && e->laddr.port == laddr.port) { + return e; + } + } + + return NULL; +} + +/** + * net_rx_trans - receive L4 packets + * @ms: an array of mbufs to process + * @nr: the size of the @ms array + */ +void net_rx_trans(struct mbuf **ms, int nr) +{ + int i; + const struct ip_hdr *iphdr; + + /* deliver each packet to a L4 protocol handler */ + for (i = 0; i < nr; i++) { + struct mbuf *m = ms[i]; + struct trans_entry *e; + + rcu_read_lock(); + e = trans_lookup(m); + if (unlikely(!e)) { + rcu_read_unlock(); + iphdr = mbuf_network_hdr(m, *iphdr); + if (iphdr->proto == IPPROTO_TCP) + tcp_rx_closed(m); + mbuf_free(m); + continue; + } + e->ops->recv(e, m); + rcu_read_unlock(); + } +} + +/** + * trans_error - reports a network error to the L4 layer + * @m: the mbuf that triggered the error + * @err: the suggested ernno to report + */ +void trans_error(struct mbuf *m, int err) +{ + struct trans_entry *e; + + rcu_read_lock(); + e = trans_lookup(m); + if (e && e->ops->err) + e->ops->err(e, err); + rcu_read_unlock(); +} + +/** + * trans_init - initializes transport protocol infrastructure + * + * Returns 0 (always successful). + */ +int trans_init(void) +{ + int i; + + spin_lock_init(&trans_lock); + + for (i = 0; i < TRANS_TBL_SIZE; i++) rcu_hlist_init_head(&trans_tbl[i]); + + trans_seed = rand_crc32c(0x48FA8BC1 ^ (uint32_t)proc->pid); + return 0; +} diff --git a/libos/net/udp.c b/libos/net/udp.c new file mode 100644 index 0000000..3910b0c --- /dev/null +++ b/libos/net/udp.c @@ -0,0 +1,686 @@ +/* + * udp.c - support for User Datagram Protocol (UDP) + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "waitq.h" + +#define UDP_IN_DEFAULT_CAP 512 +#define UDP_OUT_DEFAULT_CAP 2048 + +static int udp_send_raw(struct mbuf *m, size_t len, struct netaddr laddr, struct netaddr raddr) +{ + struct udp_hdr *udphdr; + + /* write UDP header */ + udphdr = mbuf_push_hdr(m, *udphdr); + udphdr->src_port = hton16(laddr.port); + udphdr->dst_port = hton16(raddr.port); + udphdr->len = hton16(len + sizeof(*udphdr)); + udphdr->cksum = 0; + + /* send the IP packet */ + return net_tx_ip(m, IPPROTO_UDP, raddr.ip); +} + +/* + * UDP Socket Support + */ + +struct udp_conn { + struct trans_entry e; + bool shutdown; + + /* ingress support */ + spinlock_t inq_lock; + int inq_cap; + int inq_len; + int inq_err; + waitq_t inq_wq; + struct mbufq inq; + + /* egress support */ + spinlock_t outq_lock; + bool outq_free; + int outq_cap; + int outq_len; + waitq_t outq_wq; +}; + +/* handles ingress packets for UDP sockets */ +static void udp_conn_recv(struct trans_entry *e, struct mbuf *m) +{ + udp_conn_t *c = container_of(e, udp_conn_t, e); + struct task *t; + + if (unlikely(!mbuf_pull_hdr_or_null(m, sizeof(struct udp_hdr)))) { + mbuf_free(m); + return; + } + + spin_lock_np(&c->inq_lock); + /* drop packet if the ingress queue is full */ + if (c->inq_len >= c->inq_cap || c->inq_err || c->shutdown) { + spin_unlock_np(&c->inq_lock); + mbuf_free(m); + return; + } + + /* enqueue the packet on the ingress queue */ + mbufq_push_tail(&c->inq, m); + c->inq_len++; + + /* wake up a waiter */ + t = waitq_signal(&c->inq_wq, &c->inq_lock); + spin_unlock_np(&c->inq_lock); + + waitq_signal_finish(t); +} + +/* handles network errors for UDP sockets */ +static void udp_conn_err(struct trans_entry *e, int err) +{ + udp_conn_t *c = container_of(e, udp_conn_t, e); + + bool do_release; + + spin_lock_np(&c->inq_lock); + do_release = !c->inq_err && !c->shutdown; + c->inq_err = err; + spin_unlock_np(&c->inq_lock); + + if (do_release) + waitq_release(&c->inq_wq); +} + +/* operations for UDP sockets */ +const struct trans_ops udp_conn_ops = { + .recv = udp_conn_recv, + .err = udp_conn_err, +}; + +static void udp_init_conn(udp_conn_t *c) +{ + c->shutdown = false; + + /* initialize ingress fields */ + spin_lock_init(&c->inq_lock); + c->inq_cap = UDP_IN_DEFAULT_CAP; + c->inq_len = 0; + c->inq_err = 0; + waitq_init(&c->inq_wq); + mbufq_init(&c->inq); + + /* initialize egress fields */ + spin_lock_init(&c->outq_lock); + c->outq_free = false; + c->outq_cap = UDP_OUT_DEFAULT_CAP; + c->outq_len = 0; + waitq_init(&c->outq_wq); +} + +static void udp_finish_release_conn(struct rcu_head *h) +{ + udp_conn_t *c = container_of(h, udp_conn_t, e.rcu); + sfree(c); +} + +static void udp_release_conn(udp_conn_t *c) +{ + assert(waitq_empty(&c->inq_wq) && waitq_empty(&c->outq_wq)); + assert(mbufq_empty(&c->inq)); + rcu_free(&c->e.rcu, udp_finish_release_conn); +} + +/** + * udp_dial - creates a UDP socket between a local and remote address + * @laddr: the local UDP address + * @raddr: the remote UDP address + * @c_out: a pointer to store the UDP socket (if successful) + * + * Returns 0 if success, otherwise fail. + */ +int udp_dial(struct netaddr laddr, struct netaddr raddr, udp_conn_t **c_out) +{ + udp_conn_t *c; + int ret; + + /* only can support one local IP so far */ + if (laddr.ip == 0) + laddr.ip = io->addr; + else if (laddr.ip != io->addr) + return -EINVAL; + + c = smalloc(sizeof(*c)); + if (!c) + return -ENOMEM; + + udp_init_conn(c); + trans_init_5tuple(&c->e, IPPROTO_UDP, &udp_conn_ops, laddr, raddr); + + if (laddr.port == 0) + ret = trans_table_add_with_ephemeral_port(&c->e); + else + ret = trans_table_add(&c->e); + if (ret) { + sfree(c); + return ret; + } + + *c_out = c; + return 0; +} + +/** + * udp_listen - creates a UDP socket listening to a local address + * @laddr: the local UDP address + * @c_out: a pointer to store the UDP socket (if successful) + * + * Returns 0 if success, otherwise fail. + */ +int udp_listen(struct netaddr laddr, udp_conn_t **c_out) +{ + udp_conn_t *c; + int ret; + + /* only can support one local IP so far */ + if (laddr.ip == 0) + laddr.ip = io->addr; + else if (laddr.ip != io->addr) + return -EINVAL; + + c = smalloc(sizeof(*c)); + if (!c) + return -ENOMEM; + + udp_init_conn(c); + trans_init_3tuple(&c->e, IPPROTO_UDP, &udp_conn_ops, laddr); + + ret = trans_table_add(&c->e); + if (ret) { + sfree(c); + return ret; + } + + *c_out = c; + return 0; +} + +/** + * udp_local_addr - gets the local address of the socket + * @c: the UDP socket + */ +struct netaddr udp_local_addr(udp_conn_t *c) +{ + return c->e.laddr; +} + +/** + * udp_remote_addr - gets the remote address of the socket + * @c: the UDP socket + * + * A UDP socket may not have a remote address attached. If so, the IP and + * port will be set to zero. + */ +struct netaddr udp_remote_addr(udp_conn_t *c) +{ + return c->e.raddr; +} + +/** + * udp_set_buffers - changes send and receive buffer sizes + * @c: the UDP socket + * @read_mbufs: the maximum number of read mbufs to buffer + * @write_mbufs: the maximum number of write mbufs to buffer + * + * Returns 0 if the inputs were valid. + */ +int udp_set_buffers(udp_conn_t *c, int read_mbufs, int write_mbufs) +{ + c->inq_cap = read_mbufs; + c->outq_cap = write_mbufs; + + /* TODO: free mbufs that go over new limits? */ + return 0; +} + +/** + * udp_read_from - reads from a UDP socket + * @c: the UDP socket + * @buf: a buffer to store the datagram + * @len: the size of @buf + * @raddr: a pointer to store the remote address of the datagram (if not NULL) + * + * WARNING: This a blocking function. It will wait until a datagram is + * available, an error occurs, or the socket is shutdown. + * + * Returns the number of bytes in the datagram, or @len if the datagram + * is >= @len in size. If the socket has been shutdown, returns 0. + */ +ssize_t udp_read_from(udp_conn_t *c, void *buf, size_t len, struct netaddr *raddr) +{ + ssize_t ret; + struct mbuf *m; + + spin_lock_np(&c->inq_lock); + + /* block until there is an actionable event */ + while (mbufq_empty(&c->inq) && !c->inq_err && !c->shutdown) + waitq_wait(&c->inq_wq, &c->inq_lock); + + /* is the socket drained and shutdown? */ + if (mbufq_empty(&c->inq) && c->shutdown) { + spin_unlock_np(&c->inq_lock); + return 0; + } + + /* propagate error status code if an error was detected */ + if (c->inq_err) { + spin_unlock_np(&c->inq_lock); + return -c->inq_err; + } + + /* pop an mbuf and deliver the payload */ + m = mbufq_pop_head(&c->inq); + c->inq_len--; + spin_unlock_np(&c->inq_lock); + + ret = MIN(len, mbuf_length(m)); + memcpy(buf, mbuf_data(m), ret); + if (raddr) { + struct ip_hdr *iphdr = mbuf_network_hdr(m, *iphdr); + struct udp_hdr *udphdr = mbuf_transport_hdr(m, *udphdr); + raddr->ip = ntoh32(iphdr->saddr); + raddr->port = ntoh16(udphdr->src_port); + if (c->e.match == TRANS_MATCH_5TUPLE) { + assert(c->e.raddr.ip == raddr->ip && c->e.raddr.port == raddr->port); + } + } + mbuf_free(m); + return ret; +} + +static void udp_tx_release_mbuf(struct mbuf *m) +{ + udp_conn_t *c = (udp_conn_t *)m->release_data; + struct task *t = NULL; + bool free_conn; + + spin_lock_np(&c->outq_lock); + c->outq_len--; + free_conn = (c->outq_free && c->outq_len == 0); + if (!c->shutdown) + t = waitq_signal(&c->outq_wq, &c->outq_lock); + spin_unlock_np(&c->outq_lock); + waitq_signal_finish(t); + + net_tx_release_mbuf(m); + if (free_conn) + udp_release_conn(c); +} + +/** + * udp_write_to - writes to a UDP socket + * @c: the UDP socket + * @buf: a buffer from which to load the payload + * @len: the length of the payload + * @raddr: the remote address of the datagram (if not NULL) + * + * WARNING: This a blocking function. It will wait until space in the transmit + * buffer is available or the socket is shutdown. + * + * Returns the number of payload bytes sent in the datagram. If an error + * occurs, returns < 0 to indicate the error code. + */ +ssize_t udp_write_to(udp_conn_t *c, const void *buf, size_t len, const struct netaddr *raddr) +{ + struct netaddr addr; + ssize_t ret; + struct mbuf *m; + void *payload; + + if (len > UDP_MAX_PAYLOAD) + return -EMSGSIZE; + if (!raddr) { + if (c->e.match == TRANS_MATCH_3TUPLE) + return -EDESTADDRREQ; + addr = c->e.raddr; + } else { + addr = *raddr; + } + + spin_lock_np(&c->outq_lock); + + /* block until there is an actionable event */ + while (c->outq_len >= c->outq_cap && !c->shutdown) waitq_wait(&c->outq_wq, &c->outq_lock); + + /* is the socket shutdown? */ + if (c->shutdown) { + spin_unlock_np(&c->outq_lock); + return -EPIPE; + } + + c->outq_len++; + spin_unlock_np(&c->outq_lock); + + m = net_tx_alloc_mbuf(); + if (unlikely(!m)) + return -ENOBUFS; + + /* write datagram payload */ + payload = mbuf_put(m, len); + memcpy(payload, buf, len); + + /* override mbuf release method */ + m->release = udp_tx_release_mbuf; + m->release_data = (unsigned long)c; + + ret = udp_send_raw(m, len, c->e.laddr, addr); + if (unlikely(ret)) { + net_tx_release_mbuf(m); + return ret; + } + + return len; +} + +/** + * udp_read - reads from a UDP socket + * @c: the UDP socket + * @buf: a buffer to store the datagram + * @len: the size of @buf + * + * WARNING: This a blocking function. It will wait until a datagram is + * available, an error occurs, or the socket is shutdown. + * + * Returns the number of bytes in the datagram, or @len if the datagram + * is >= @len in size. If the socket has been shutdown, returns 0. + */ +ssize_t udp_read(udp_conn_t *c, void *buf, size_t len) +{ + return udp_read_from(c, buf, len, NULL); +} + +/** + * udp_write - writes to a UDP socket + * @c: the UDP socket + * @buf: the payload to send + * @len: the length of the payload + * + * WARNING: This a blocking function. It will wait until space in the transmit + * buffer is available or the socket is shutdown. + * + * Returns the number of payload bytes sent in the datagram. If an error + * occurs, returns < 0 to indicate the error code. + */ +ssize_t udp_write(udp_conn_t *c, const void *buf, size_t len) +{ + return udp_write_to(c, buf, len, NULL); +} + +void __udp_shutdown(udp_conn_t *c) +{ + spin_lock_np(&c->inq_lock); + spin_lock_np(&c->outq_lock); + BUG_ON(c->shutdown); + c->shutdown = true; + spin_unlock_np(&c->outq_lock); + spin_unlock_np(&c->inq_lock); + + /* prevent ingress receive and error dispatch (after RCU period) */ + trans_table_remove(&c->e); +} + +/** + * udp_shutdown - disables a UDP socket + * @c: the socket to disable + * + * All blocking requests on the socket will return a failure. + */ +void udp_shutdown(udp_conn_t *c) +{ + /* shutdown the UDP socket */ + __udp_shutdown(c); + + /* wake all blocked threads */ + if (!c->inq_err) + waitq_release(&c->inq_wq); + waitq_release(&c->outq_wq); +} + +/** + * udp_close - frees a UDP socket + * @c: the socket to free + * + * WARNING: Only the last reference can safely call this method. Call + * udp_shutdown() first if any threads are sleeping on the socket. + */ +void udp_close(udp_conn_t *c) +{ + bool free_conn; + + if (!c->shutdown) + __udp_shutdown(c); + + BUG_ON(!waitq_empty(&c->inq_wq)); + BUG_ON(!waitq_empty(&c->outq_wq)); + + /* free all in-flight mbufs */ + while (true) { + struct mbuf *m = mbufq_pop_head(&c->inq); + if (!m) + break; + mbuf_free(m); + } + + spin_lock_np(&c->outq_lock); + free_conn = c->outq_len == 0; + c->outq_free = true; + spin_unlock_np(&c->outq_lock); + + if (free_conn) + udp_release_conn(c); +} + +/* + * Parallel API + */ + +struct udp_spawner { + struct trans_entry e; + udpspawn_fn_t fn; +}; + +/* handles ingress packets with parallel threads */ +static void udp_par_recv(struct trans_entry *e, struct mbuf *m) +{ + udp_spawner_t *s = container_of(e, udp_spawner_t, e); + const struct ip_hdr *iphdr; + const struct udp_hdr *udphdr; + struct udp_spawn_data *d; + struct task *t; + + iphdr = mbuf_network_hdr(m, *iphdr); + udphdr = mbuf_pull_hdr_or_null(m, *udphdr); + if (unlikely(!udphdr)) { + mbuf_free(m); + return; + } + + t = task_create_with_buf((thread_fn_t)s->fn, (void **)&d, sizeof(*d)); + if (unlikely(!t)) { + mbuf_free(m); + return; + } + + d->buf = mbuf_data(m); + d->len = mbuf_length(m); + d->laddr = e->laddr; + d->raddr.ip = ntoh32(iphdr->saddr); + d->raddr.port = ntoh16(udphdr->src_port); + d->release_data = m; + + if (unlikely(task_enqueue(current_cpu_id(), t))) { + log_err("udp: failed to spawn udp packet receiver"); + mbuf_free(m); + task_free(t); + return; + } +} + +/* operations for UDP spawners */ +const struct trans_ops udp_par_ops = { + .recv = udp_par_recv, +}; + +/** + * udp_create_spawner - creates a UDP spawner for ingress datagrams + * @laddr: the local address to bind to + * @fn: a handler function for each datagram + * @s_out: if successful, set to a pointer to the spawner + * + * Returns 0 if successful, otherwise fail. + */ +int udp_create_spawner(struct netaddr laddr, udpspawn_fn_t fn, udp_spawner_t **s_out) +{ + udp_spawner_t *s; + int ret; + + /* only can support one local IP so far */ + if (laddr.ip == 0) + laddr.ip = io->addr; + else if (laddr.ip != io->addr) + return -EINVAL; + + s = smalloc(sizeof(*s)); + if (!s) + return -ENOMEM; + + trans_init_3tuple(&s->e, IPPROTO_UDP, &udp_par_ops, laddr); + s->fn = fn; + ret = trans_table_add(&s->e); + if (ret) { + sfree(s); + return ret; + } + + *s_out = s; + return 0; +} + +static void udp_release_spawner(struct rcu_head *h) +{ + udp_spawner_t *s = container_of(h, udp_spawner_t, e.rcu); + sfree(s); +} + +/** + * udp_destroy_spawner - unregisters and frees a UDP spawner + * @s: the spawner to free + */ +void udp_destroy_spawner(udp_spawner_t *s) +{ + trans_table_remove(&s->e); + rcu_free(&s->e.rcu, udp_release_spawner); +} + +/** + * udp_send - sends a UDP datagram + * @buf: the payload to send + * @len: the length of the payload + * @laddr: the local UDP address + * @raddr: the remote UDP address + * + * Returns the number of payload bytes sent in the datagram. If an error + * occurs, returns < 0 to indicate the error code. + */ +ssize_t udp_send(const void *buf, size_t len, struct netaddr laddr, struct netaddr raddr) +{ + void *payload; + struct mbuf *m; + int ret; + + if (len > UDP_MAX_PAYLOAD) + return -EMSGSIZE; + if (laddr.ip == 0) + laddr.ip = io->addr; + else if (laddr.ip != io->addr) + return -EINVAL; + if (laddr.port == 0) + return -EINVAL; + + m = net_tx_alloc_mbuf(); + if (unlikely(!m)) + return -ENOBUFS; + + /* write datagram payload */ + payload = mbuf_put(m, len); + memcpy(payload, buf, len); + + ret = udp_send_raw(m, len, laddr, raddr); + if (unlikely(ret)) { + mbuf_free(m); + return ret; + } + + return len; +} + +ssize_t udp_sendv(const struct iovec *iov, int iovcnt, struct netaddr laddr, struct netaddr raddr) +{ + struct mbuf *m; + int i, ret; + ssize_t len = 0; + + if (laddr.ip == 0) + laddr.ip = io->addr; + else if (laddr.ip != io->addr) + return -EINVAL; + if (laddr.port == 0) + return -EINVAL; + + m = net_tx_alloc_mbuf(); + if (unlikely(!m)) + return -ENOBUFS; + + /* write datagram payload */ + for (i = 0; i < iovcnt; i++) { + len += iov[i].iov_len; + if (unlikely(len > UDP_MAX_PAYLOAD)) { + mbuf_free(m); + return -EMSGSIZE; + } + memcpy(mbuf_put(m, iov[i].iov_len), iov[i].iov_base, iov[i].iov_len); + } + + ret = udp_send_raw(m, len, laddr, raddr); + if (unlikely(ret)) { + mbuf_free(m); + return ret; + } + + return len; +} + +/** + * udp_spawn_data_release - frees the datagram buffer for a spawner thread + * @release_data: the release data pointer + * + * Must be called when finished with the buffer passed to the spawner thread. + */ +void udp_spawn_data_release(void *release_data) +{ + struct mbuf *m = release_data; + mbuf_free(m); +} diff --git a/libos/net/waitq.h b/libos/net/waitq.h new file mode 100644 index 0000000..30cc430 --- /dev/null +++ b/libos/net/waitq.h @@ -0,0 +1,117 @@ +/* + * waitq.h - a light weight condition variable that works with locks instead of + * mutexes. + */ + +#pragma once + +#include +#include +#include +#include +#include + +typedef struct waitq { + struct list_head waiters; +} waitq_t; + +/** + * waitq_wait - waits for the next signal + * @q: the wake queue + * @l: a held spinlock protecting the wake queue and the condition + */ +static inline void waitq_wait(waitq_t *q, spinlock_t *l) +{ + struct task *task = task_self(); + + assert_spin_lock_held(l); + list_add_tail(&q->waiters, &task->link); + task_block(l); + spin_lock_np(l); +} + +/** + * waitq_signal - wakes up to one waiter on the wake queue + * @q: the wake queue + * @l: a held spinlock protecting the wake queue and the condition + */ +static inline struct task *waitq_signal(waitq_t *q, spinlock_t *l) +{ + assert_spin_lock_held(l); + return list_pop(&q->waiters, struct task, link); +} + +/** + * waitq_signal_finish - finishes waking up to one waiter + * @th: the thread to wake (if non-NULL) + * + * Call this method after dropping the lock to reduce the size of the critical + * section. + */ +static inline void waitq_signal_finish(struct task *t) +{ + if (t) + task_wakeup(t); +} + +/** + * waitq_signal_locked - wakes up to one waiter on the wake queue + * @q: the wake queue + * @l: a held spinlock protecting the wake queue and the condition + */ +static inline void waitq_signal_locked(waitq_t *q, spinlock_t *l) +{ + struct task *t = waitq_signal(q, l); + waitq_signal_finish(t); +} + +/** + * waitq_release - wakes all pending waiters + * @q: the wake queue + * + * WARNING: the condition must have been updated with the lock held to + * prevent future waiters. However, this method can be called after the + * lock is released. + */ +static inline void waitq_release(waitq_t *q) +{ + while (true) { + struct task *t = list_pop(&q->waiters, struct task, link); + if (!t) + break; + task_wakeup(t); + } +} + +static inline void waitq_release_start(waitq_t *q, struct list_head *waiters) +{ + list_append_list(waiters, &q->waiters); +} + +static inline void waitq_release_finish(struct list_head *waiters) +{ + while (true) { + struct task *t = list_pop(waiters, struct task, link); + if (!t) + break; + task_wakeup(t); + } +} + +/** + * waitq_empty - returns true if there are no waiters + * @q: the wait queue to check + */ +static inline bool waitq_empty(waitq_t *q) +{ + return list_empty(&q->waiters); +} + +/** + * waitq_init - initializes a wake queue + * @q: the wake queue to initialize + */ +static inline void waitq_init(waitq_t *q) +{ + list_head_init(&q->waiters); +} diff --git a/libos/percpu.c b/libos/percpu.c new file mode 100644 index 0000000..99f109a --- /dev/null +++ b/libos/percpu.c @@ -0,0 +1,56 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +unsigned int thread_count; +__thread void *percpu_ptr; + +define_cpu_array(void *, percpu_offsets, USED_CPUS); + +extern const char __percpu_start[]; +extern const char __percpu_end[]; + +static int alloc_percpu_data(void) +{ + void *addr; + size_t len = __percpu_end - __percpu_start; + + /* no percpu data */ + if (!len) + return 0; + + addr = mem_map_anom(NULL, len, PGSIZE_2MB, current_numa_node()); + if (addr == MAP_FAILED) + return -ENOMEM; + + memset(addr, 0, len); + percpu_ptr = addr; + percpu_offsets[current_cpu_id()] = addr; + return 0; +} + +/** + * percpu_init - initializes a thread + * + * Returns 0 if successful, otherwise fail. + */ +int percpu_init(void) +{ + int ret; + + if ((ret = alloc_percpu_data()) < 0) + return ret; + + return 0; +} diff --git a/libos/platform/cpu.c b/libos/platform/cpu.c new file mode 100644 index 0000000..28abe44 --- /dev/null +++ b/libos/platform/cpu.c @@ -0,0 +1,105 @@ +#include +#include +#include + +#include +#include +#include +#include +#include + +define_cpu_array(int, hw_cpu_list, USED_CPUS); +define_cpu_array(int, sibling_cpu_map, MAX_CPUS); +define_cpu_array(int, cpu_siblings, USED_CPUS); + +static int parse_cpu_list_str(const char *str, int n, int *cpu_list) +{ + int i = 0; + int j = 0; + int k = 0; + int len = strlen(str); + char buf[10]; + + if (!n) + return -1; + + while (i < len) { + if (str[i] == ',') { + buf[j] = '\0'; + cpu_list[k++] = atoi(buf); + if (k == n) { + return 0; + } + j = 0; + } else { + buf[j++] = str[i]; + } + i++; + } + buf[j] = '\0'; + cpu_list[k++] = atoi(buf); + return 0; +} + +int bind_to_cpu(int tid, int cpu_id) +{ + int hw_cpu = hw_cpu_id(cpu_id); + cpu_set_t set; + CPU_ZERO(&set); + CPU_SET(hw_cpu, &set); + + int ret = sched_setaffinity(tid, sizeof(set), &set); + if (ret) { + log_warn("failed to bind to cpu %d(%d)", cpu_id, hw_cpu); + } + return ret; +} + +int unbind_cpus(int tid) +{ + cpu_set_t set; + CPU_ZERO(&set); + for (int i = 0; i < MAX_CPUS; i++) { + CPU_SET(i, &set); + } + for (int i = 0; i < USED_CPUS; i++) { + CPU_CLR(hw_cpu_id(i), &set); + } + + int ret = sched_setaffinity(tid, sizeof(set), &set); + if (ret) { + log_warn("failed to unbind cpus"); + } + return ret; +} + +int platform_cpu_init() +{ + // TODO: parse /sys/devices/system/cpu/cpu*/topology/thread_siblings_list + + int ret; + if ((ret = parse_cpu_list_str(USED_HW_CPU_LIST, USED_CPUS, hw_cpu_list))) { + log_err("failed to parse hw cpu list"); + return ret; + } + + if ((ret = parse_cpu_list_str(SIBLING_CPU_MAP, MAX_CPUS, sibling_cpu_map))) { + log_err("failed to parse sibling cpu map"); + return ret; + } + + for (int i = 0; i < USED_CPUS; i++) { + int hw_cpu = hw_cpu_list[i]; + int sibling = sibling_cpu_map[hw_cpu]; + cpu_siblings[i] = -1; + for (int j = 0; j < USED_CPUS; j++) { + if (hw_cpu_list[j] == sibling) { + cpu_siblings[i] = j; + break; + } + } + // BUG_ON(cpu_siblings[i] == -1); + } + + return 0; +} diff --git a/libos/platform/dev.c b/libos/platform/dev.c new file mode 100644 index 0000000..7c3dc0e --- /dev/null +++ b/libos/platform/dev.c @@ -0,0 +1,97 @@ +#include +#include +#ifdef SKYLOFT_SIGNAL_SWITCH +#include +#include +#include +#endif + +#include +#include +#include + +#ifdef SKYLOFT_SIGNAL_SWITCH +static sigset_t signal_set; +#else +static int dev_fd; +#endif + +#ifdef SKYLOFT_SIGNAL_SWITCH +static inline int wait_for_signal() +{ + int sig; + int ret = sigwait(&signal_set, &sig); + if (ret) { + log_warn("sigwait failed"); + } + return ret; +} +#endif + +int platform_dev_init() +{ +#ifdef SKYLOFT_SIGNAL_SWITCH + log_info("platform: switching mode: signal"); + sigemptyset(&signal_set); + sigaddset(&signal_set, SIGUSR1); + return 0; +#else + log_info("platform: switching mode: kmod"); + dev_fd = open(SKYLOFT_DEV_PATH, O_RDWR | O_SYNC); + return dev_fd; +#endif +} + +int skyloft_park_on_cpu(int cpu) +{ +#ifdef SKYLOFT_SIGNAL_SWITCH + int ret = 0; + if (cpu >= 0) { + ret = bind_to_cpu(0, cpu); + } + wait_for_signal(); + return ret; +#else + return ioctl(dev_fd, SKYLOFT_IO_PARK, cpu >= 0 ? hw_cpu_id(cpu) : -1); +#endif +} + +int skyloft_wakeup(pid_t target_tid) +{ +#ifdef SKYLOFT_SIGNAL_SWITCH + int ret = kill(target_tid, SIGUSR1); + if (ret) { + errno = -ESRCH; + } + return ret; +#else + return ioctl(dev_fd, SKYLOFT_IO_WAKEUP, target_tid); +#endif +} + +int skyloft_switch_to(pid_t target_tid) +{ +#ifdef SKYLOFT_SIGNAL_SWITCH + int ret = kill(target_tid, SIGUSR1); + if (ret) { + log_warn("send signal to thread %d failed: %d %d\n", target_tid, ret, errno); + } else { + wait_for_signal(); + } + return ret; +#else + return ioctl(dev_fd, SKYLOFT_IO_SWITCH_TO, target_tid); +#endif +} + +#ifdef SKYLOFT_UINTR +int skyloft_setup_device_uintr(int flags) +{ + return ioctl(dev_fd, SKYLOFT_IO_SETUP_DEVICE_UINTR, flags); +} + +int skyloft_timer_set_hz(int hz) +{ + return ioctl(dev_fd, SKYLOFT_IO_TIMER_SET_HZ, hz); +} +#endif diff --git a/libos/platform/mem.c b/libos/platform/mem.c new file mode 100644 index 0000000..63434ba --- /dev/null +++ b/libos/platform/mem.c @@ -0,0 +1,313 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +long mbind(void *start, size_t len, int mode, const unsigned long *nmask, unsigned long maxnode, + unsigned flags) +{ + return syscall(__NR_mbind, start, len, mode, nmask, maxnode, flags); +} + +static void sigbus_error(int sig) { panic("couldn't map pages"); } + +void touch_mapping(void *base, size_t len, size_t pgsize) +{ + sighandler_t s; + char *pos; + + /* + * Unfortunately mmap() provides no error message if MAP_POPULATE fails + * because of insufficient memory. Therefore, we manually force a write + * on each page to make sure the mapping was successful. + */ + s = signal(SIGBUS, sigbus_error); + for (pos = (char *)base; pos < (char *)base + len; pos += pgsize) ACCESS_ONCE(*pos); + signal(SIGBUS, s); +} + +static void *__mem_map_common(void *base, size_t len, size_t pgsize, int flags, int fd, + unsigned long *mask, int numa_policy) +{ + void *addr; + + flags |= MAP_POPULATE; + if (fd == -1) + flags |= MAP_ANONYMOUS; + if (base) + flags |= MAP_FIXED; + + len = align_up(len, pgsize); + + switch (pgsize) { + case PGSIZE_4KB: + break; + case PGSIZE_2MB: + flags |= MAP_HUGETLB; +#ifdef MAP_HUGE_2MB + flags |= MAP_HUGE_2MB; +#endif + break; + case PGSIZE_1GB: +#ifdef MAP_HUGE_1GB + flags |= MAP_HUGETLB | MAP_HUGE_1GB; +#else + return MAP_FAILED; +#endif + break; + default: /* fail on other sizes */ + return MAP_FAILED; + } + + addr = mmap(base, len, PROT_READ | PROT_WRITE, flags, fd, 0); + if (addr == MAP_FAILED) { + log_err("failed %s", strerror(errno)); + return MAP_FAILED; + } + + BUILD_ASSERT(sizeof(unsigned long) * 8 >= MAX_NUMA); + if (mbind(addr, len, numa_policy, mask ? mask : NULL, mask ? MAX_NUMA + 1 : 0, + MPOL_MF_STRICT | MPOL_MF_MOVE)) { + log_err("failed %s", strerror(errno)); + goto fail; + } + + touch_mapping(addr, len, pgsize); + return addr; + +fail: + munmap(addr, len); + return MAP_FAILED; +} + +/** + * mem_map_anom - map anonymous memory pages + * @base: the base address (or NULL for automatic) + * @len: the length of the mapping + * @pgsize: the page size + * @node: the NUMA node + * + * Returns the base address, or MAP_FAILED if out of memory + */ +void *mem_map_anom(void *base, size_t len, size_t pgsize, int node) +{ + unsigned long mask = (1 << node); + return __mem_map_common(base, len, pgsize, MAP_PRIVATE, -1, &mask, MPOL_BIND); +} + +/** + * mem_map_shm - maps a System V shared memory segment backed with a file + * @path: the file path to the shared memory backing file + * @base: the base address to map the shared segment (or automatic if NULL) + * @len: the length of the mapping + * @pgsize: the size of each page + * @node: the NUMA node + * + * Returns a pointer to the mapping, or NULL if the mapping failed. + */ +void *mem_map_shm_file(const char *path, void *base, size_t len, size_t pgsize, int node) +{ + int fd = open(path, O_CREAT | O_RDWR, 0666); + if (fd < 0) { + return MAP_FAILED; + } + + len = align_up(len, pgsize); + if (ftruncate(fd, len) < 0) { + return MAP_FAILED; + } + + unsigned long mask = (1 << node); + return __mem_map_common(base, len, pgsize, MAP_SHARED, fd, &mask, MPOL_BIND); +} + +/** + * mem_map_shm - maps a System V shared memory segment + * @key: the unique key that identifies the shared region (e.g. use ftok()) + * @base: the base address to map the shared segment (or automatic if NULL) + * @len: the length of the mapping + * @pgsize: the size of each page + * @exclusive: ensure this call creates the shared segment + * + * Returns a pointer to the mapping, or NULL if the mapping failed. + */ +void *mem_map_shm(mem_key_t key, void *base, size_t len, size_t pgsize, bool exclusive) +{ + void *addr; + int shmid, flags = IPC_CREAT | 0777; + + BUILD_ASSERT(sizeof(mem_key_t) == sizeof(key_t)); + + switch (pgsize) { + case PGSIZE_4KB: + break; + case PGSIZE_2MB: + flags |= SHM_HUGETLB; +#ifdef SHM_HUGE_2MB + flags |= SHM_HUGE_2MB; +#endif + break; + case PGSIZE_1GB: +#ifdef SHM_HUGE_1GB + flags |= SHM_HUGETLB | SHM_HUGE_1GB; +#else + return MAP_FAILED; +#endif + break; + default: /* fail on other sizes */ + return MAP_FAILED; + } + + if (exclusive) + flags |= IPC_EXCL; + + shmid = shmget(key, len, flags); + if (shmid == -1) + return MAP_FAILED; + + addr = shmat(shmid, base, 0); + if (addr == MAP_FAILED) + return MAP_FAILED; + + touch_mapping(addr, len, pgsize); + return addr; +} + +/** + * mem_unmap_shm - detach a shared memory mapping + * @addr: the base address of the mapping + * + * Returns 0 if successful, otherwise fail. + */ +int mem_unmap_shm(void *addr) +{ + if (shmdt(addr) == -1) + return -errno; + return 0; +} + +#define PAGEMAP_PGN_MASK 0x7fffffffffffffULL +#define PAGEMAP_FLAG_PRESENT (1ULL << 63) +#define PAGEMAP_FLAG_SWAPPED (1ULL << 62) +#define PAGEMAP_FLAG_FILE (1ULL << 61) +#define PAGEMAP_FLAG_SOFTDIRTY (1ULL << 55) + +/** + * mem_lookup_page_phys_addrs - determines the physical address of pages + * @addr: a pointer to the start of the pages (must be @size aligned) + * @len: the length of the mapping + * @pgsize: the page size (4KB, 2MB, or 1GB) + * @paddrs: a pointer store the physical addresses (of @nr elements) + * + * Returns 0 if successful, otherwise failure. + */ +int mem_lookup_page_phys_addrs(void *addr, size_t len, size_t pgsize, physaddr_t *paddrs) +{ + uintptr_t pos; + uint64_t tmp; + int fd, i = 0, ret = 0; + + /* + * 4 KB pages could be swapped out by the kernel, so it is not + * safe to get a machine address. If we later decide to support + * 4KB pages, then we need to mlock() the page first. + */ + if (pgsize == PGSIZE_4KB) + return -EINVAL; + + fd = open("/proc/self/pagemap", O_RDONLY); + if (fd < 0) { + log_warn("%s(): cannot open /proc/self/pagemap: %s", __func__, strerror(errno)); + return -EIO; + } + + for (pos = (uintptr_t)addr; pos < (uintptr_t)addr + len; pos += pgsize) { + if (lseek(fd, pos / PGSIZE_4KB * sizeof(uint64_t), SEEK_SET) == (off_t)-1) { + log_warn("%s(): seek error in /proc/self/pagemap: %s", __func__, strerror(errno)); + ret = -EIO; + goto out; + } + if (read(fd, &tmp, sizeof(uint64_t)) <= 0) { + log_warn("%s(): cannot read /proc/self/pagemap: %s", __func__, strerror(errno)); + ret = -EIO; + goto out; + } + if (!(tmp & PAGEMAP_FLAG_PRESENT)) { + ret = -ENODEV; + goto out; + } + + paddrs[i++] = (tmp & PAGEMAP_PGN_MASK) * PGSIZE_4KB; + } + +out: + close(fd); + return ret; +} + +physaddr_t mem_virt2phys(void *addr) +{ + physaddr_t pa; + int fd, ret; + int page_size; + uint64_t virt_pfn; + off_t offset; + uint64_t page; + char pagemap[32]; + + page_size = getpagesize(); + + sprintf(pagemap, "/proc/self/pagemap"); + fd = open(pagemap, O_RDONLY); + if (fd < 0) { + log_warn("%s(): cannot open %s: %s", __func__, pagemap, strerror(errno)); + return -1; + } + + virt_pfn = (uint64_t)addr / page_size; + offset = sizeof(uint64_t) * virt_pfn; + if (lseek(fd, offset, SEEK_SET) == (off_t)-1) { + log_warn("%s(): seek error in %s: %s", __func__, pagemap, strerror(errno)); + close(fd); + return -1; + } + + ret = read(fd, &page, sizeof(uint64_t)); + close(fd); + if (ret < 0) { + log_warn("%s(): cannot read %s: %s", __func__, pagemap, strerror(errno)); + close(fd); + return -1; + } else if (ret != sizeof(uint64_t)) { + log_warn("%s(): read %d bytes from %s " + "but expected %lx", + __func__, ret, pagemap, sizeof(uint64_t)); + return -1; + } + + log_info("%s %lx", __func__, page); + /* + * the pfn (page frame number) are bits 0-54 (see + * pagemap.txt in linux Documentation) + */ + if ((page & PAGEMAP_PGN_MASK) == 0) + return -1; + + pa = ((page & PAGEMAP_PGN_MASK) * page_size) + ((unsigned long)addr % page_size); + + return pa; +} \ No newline at end of file diff --git a/libos/platform/mod.c b/libos/platform/mod.c new file mode 100644 index 0000000..52281a7 --- /dev/null +++ b/libos/platform/mod.c @@ -0,0 +1,33 @@ +#include + +extern int platform_cpu_init(); +extern int platform_dev_init(); +#ifdef SKYLOFT_UINTR +extern int platform_uintr_init_percpu(); +#endif + +int platform_init() +{ + if (platform_cpu_init() < 0) { + log_err("platform_cpu_init failed"); + return -1; + } + + if (platform_dev_init() < 0) { + log_err("platform_dev_init failed"); + return -1; + } + + return 0; +} + +int platform_init_percpu() +{ +#if defined(SKYLOFT_UINTR) && !defined(SKYLOFT_SCHED_SQ) && !defined(SKYLOFT_SCHED_SQ_LCBE) + if (platform_uintr_init_percpu() < 0) { + log_err("platform_uintr_init_percpu failed"); + return -1; + } +#endif + return 0; +} diff --git a/libos/platform/uintr.c b/libos/platform/uintr.c new file mode 100644 index 0000000..f10a44e --- /dev/null +++ b/libos/platform/uintr.c @@ -0,0 +1,52 @@ +#ifdef SKYLOFT_UINTR + +#include +#include +#include +#include +#include + +extern void uintr_handler(); + +__thread int g_uintr_index; + +int platform_uintr_init_percpu(void) +{ +#ifdef SKYLOFT_DPDK + if (sl_current_cpu_id() == IO_CPU) + return 0; +#endif + + int ret = uintr_register_handler(uintr_handler, 0); + if (ret < 0) { + log_err("uintr: failed to register a handler %d", ret); + return -1; + } + + g_uintr_index = uintr_register_self(UVEC, 0); + if (g_uintr_index < 0) { + log_err("uintr: failed to register the sender for self"); + return -1; + } + + ret = skyloft_setup_device_uintr(1); + if (ret < 0) { + log_err("uintr: failed to map device interrupts to userspace"); + return -1; + } + + ret = skyloft_timer_set_hz(TIMER_HZ); + if (ret < 0) { + log_err("uintr: failed to set timer frequency"); + return -1; + } + + local_irq_disable(); + _senduipi(g_uintr_index); + + log_info("uintr: CPU %d registered uintr index %d", sl_current_cpu_id(), g_uintr_index); + + return 0; +} + +#endif diff --git a/libos/sched/policy/cfs.c b/libos/sched/policy/cfs.c new file mode 100644 index 0000000..4a3af09 --- /dev/null +++ b/libos/sched/policy/cfs.c @@ -0,0 +1,480 @@ +/* + * cfs.c: CFS-like scheduler + */ + +#include +#include +#include +#include + +#include + +/* + * Targeted preemption latency for CPU-bound tasks: + * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) + */ +__nsec sysctl_sched_latency = 50000ULL; +/* + * Minimal preemption granularity for CPU-bound tasks: + * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) + */ +__nsec sysctl_sched_min_granularity = 12500ULL; +/* + * is kept at sysctl_sched_latency / sysctl_sched_min_granularity + */ +static unsigned int sched_nr_latency = 4; + +DEFINE_PERCPU(struct cfs_rq *, percpu_rqs); + +/* global counter for next CPU */ +static atomic_int TARGET_CPU = 0; +extern __thread int g_logic_cpu_id; + +static void __update_inv_weight(struct load_weight *lw) +{ + unsigned long w; + + if (likely(lw->inv_weight)) + return; + + w = lw->weight; + + if (unlikely(w >= WMULT_CONST)) + lw->inv_weight = 1; + else if (unlikely(!w)) + lw->inv_weight = WMULT_CONST; + else + lw->inv_weight = WMULT_CONST / w; +} + +static inline void update_load_add(struct load_weight *lw, unsigned long inc) +{ + lw->weight += inc; + lw->inv_weight = 0; +} + +static inline void update_load_sub(struct load_weight *lw, unsigned long dec) +{ + lw->weight -= dec; + lw->inv_weight = 0; +} + +static inline void update_load_set(struct load_weight *lw, unsigned long w) +{ + lw->weight = w; + lw->inv_weight = 0; +} + +static uint64_t __calc_delta(uint64_t delta_exec, unsigned long weight, struct load_weight *lw) +{ + uint64_t fact = weight; + int shift = WMULT_SHIFT; + + __update_inv_weight(lw); + + if (unlikely(fact >> 32)) { + while (fact >> 32) { + fact >>= 1; + shift--; + } + } + + /* hint to use a 32x32->64 mul */ + fact = (uint64_t)(uint32_t)fact * lw->inv_weight; + + while (fact >> 32) { + fact >>= 1; + shift--; + } + + return mul_u64_u32_shr(delta_exec, fact, shift); +} + +static inline uint64_t calc_delta_fair(uint64_t delta, struct cfs_task *task) +{ + if (unlikely(task->load.weight != NICE_0_LOAD)) + delta = __calc_delta(delta, NICE_0_LOAD, &task->load); + + return delta; +} + +static inline uint64_t max_vruntime(uint64_t max_vruntime, uint64_t vruntime) +{ + int64_t delta = (int64_t)(vruntime - max_vruntime); + if (delta > 0) + max_vruntime = vruntime; + + return max_vruntime; +} + +static inline uint64_t min_vruntime(uint64_t min_vruntime, uint64_t vruntime) +{ + int64_t delta = (int64_t)(vruntime - min_vruntime); + if (delta < 0) + min_vruntime = vruntime; + + return min_vruntime; +} + +static void update_min_vruntime(struct cfs_rq *cfs_rq) +{ + struct cfs_task *curr = cfs_rq->curr, *task; + struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline); + uint64_t vruntime = cfs_rq->min_vruntime; + + if (curr) { + if (curr->on_rq) + vruntime = curr->vruntime; + else + curr = NULL; + } + + if (leftmost) { /* non-empty tree */ + task = rb_entry(leftmost, struct cfs_task, run_node); + + if (!curr) + vruntime = task->vruntime; + else + vruntime = min_vruntime(vruntime, task->vruntime); + } + + /* ensure we never gain time by being placed backwards. */ + cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); +} + +static inline void update_curr(struct cfs_rq *cfs_rq) +{ + struct cfs_task *curr = cfs_rq->curr; + __nsec now = now_ns(); + __nsec delta_exec; + + if (unlikely(!curr)) + return; + + delta_exec = now - curr->exec_start; + if (unlikely((int64_t)delta_exec <= 0)) + return; + + curr->exec_start = now; + curr->sum_exec_runtime += delta_exec; + curr->vruntime += calc_delta_fair(delta_exec, curr); + update_min_vruntime(cfs_rq); +} + +/* + * The idea is to set a period in which each task runs once. + * + * When there are too many tasks (sched_nr_latency) we have to stretch + * this period because otherwise the slices get too small. + * + * p = (nr <= nl) ? l : l * nr / nl + */ +static uint64_t __sched_period(unsigned long nr_running) +{ + if (unlikely(nr_running > sched_nr_latency)) + return nr_running * sysctl_sched_min_granularity; + else + return sysctl_sched_latency; +} + +/* + * We calculate the wall-time slice from the period by taking a part + * proportional to the weight. + * + * s = p * wi / sum(wi) + */ +static uint64_t sched_slice(struct cfs_rq *cfs_rq, struct cfs_task *task) +{ + uint64_t slice; + struct load_weight *load; + struct load_weight lw; + + slice = __sched_period(cfs_rq->nr_running + !task->on_rq); + + load = &cfs_rq->load; + if (unlikely(!task->on_rq)) { + lw = cfs_rq->load; + update_load_add(&lw, task->load.weight); + load = &lw; + } + + return __calc_delta(slice, task->load.weight, load); +} + +/* + * We calculate the vruntime slice of a to-be-inserted task. + * + * vs = s * NICE_0 / w + */ +static uint64_t sched_vslice(struct cfs_rq *cfs_rq, struct cfs_task *task) +{ + return calc_delta_fair(sched_slice(cfs_rq, task), task); +} + +static void place_task(struct cfs_rq *cfs_rq, struct cfs_task *task, bool init) +{ + uint64_t vruntime = cfs_rq->min_vruntime; + + /* + * The 'current' period is already promised to the current tasks, + * however the extra weight of the new task will slow them down a + * little, place the new task so that it fits in the slot that + * stays open at the end. + */ + if (init) + vruntime += sched_vslice(cfs_rq, task); + + /* sleeps up to a single latency don't count. */ + if (!init) + /* + * Halve their sleep time's effect, to allow + * for a gentler effect of sleepers: + */ + vruntime -= sysctl_sched_latency >> 1; + + /* ensure we never gain time by being placed backwards. */ + task->vruntime = max_vruntime(task->vruntime, vruntime); +} + +static inline struct cfs_task *__pick_first_task(struct cfs_rq *cfs_rq) +{ + return rb_entry(rb_first_cached(&cfs_rq->tasks_timeline), struct cfs_task, run_node); +} + +static inline void __dequeue_task(struct cfs_rq *cfs_rq, struct cfs_task *task) +{ + rb_erase_cached(&task->run_node, &cfs_rq->tasks_timeline); +} + +static inline bool __vruntime_lt(struct rb_node *a, const struct rb_node *b) +{ + return (int64_t)(rb_entry(a, struct cfs_task, run_node)->vruntime - + rb_entry(b, struct cfs_task, run_node)->vruntime) < 0; +} + +static inline void __enqueue_task(struct cfs_rq *cfs_rq, struct cfs_task *task) +{ + rb_add_cached(&task->run_node, &cfs_rq->tasks_timeline, &__vruntime_lt); +} + +static inline void enqueue_update(struct cfs_rq *cfs_rq, struct cfs_task *task) +{ + update_load_add(&cfs_rq->load, task->load.weight); + cfs_rq->nr_running++; +} + +static void enqueue_task(struct cfs_rq *cfs_rq, struct cfs_task *task, bool wakeup) +{ + bool curr = (task == cfs_rq->curr); + assert_spin_lock_held(&cfs_rq->lock); + + if (!wakeup && curr) + task->vruntime += cfs_rq->min_vruntime; + update_curr(cfs_rq); + enqueue_update(cfs_rq, task); + + if (!wakeup && !curr) + task->vruntime += cfs_rq->min_vruntime; + + /* compensate before waking up task */ + if (wakeup) + place_task(cfs_rq, task, false); + + if (!curr) + __enqueue_task(cfs_rq, task); + + task->on_rq = true; + log_debug("%s: rq=%p task=%p curr=%p nr=%d", __func__, cfs_rq, task, cfs_rq->curr, + cfs_rq->nr_running); +} + +static inline void dequeue_update(struct cfs_rq *cfs_rq, struct cfs_task *task) +{ + update_load_sub(&cfs_rq->load, task->load.weight); + cfs_rq->nr_running--; +} + +static void dequeue_task(struct cfs_rq *cfs_rq, struct cfs_task *task, bool sleep) +{ + assert_spin_lock_held(&cfs_rq->lock); + + update_curr(cfs_rq); + dequeue_update(cfs_rq, task); + + if (task != cfs_rq->curr) + __dequeue_task(cfs_rq, task); + task->on_rq = false; + + /* normalize the entity after updating the min_vruntime */ + if (!sleep) + task->vruntime -= cfs_rq->min_vruntime; + + update_min_vruntime(cfs_rq); + log_debug("%s: %p %p %d", __func__, task, cfs_rq->curr, cfs_rq->nr_running); +} + +int cfs_sched_init_task(struct task *t) +{ + struct cfs_task *task = cfs_task_of(t); + memset(task, 0, sizeof(struct cfs_task)); + /* guarantee task always has weight */ + update_load_set(&task->load, NICE_0_LOAD); + t->allow_preempt = true; + return 0; +} + +void cfs_sched_finish_task(struct task *t) +{ + struct cfs_rq *cfs_rq = this_rq(); + struct cfs_task *task = cfs_task_of(t); + + assert_local_irq_disabled(); + + spin_lock(&cfs_rq->lock); + dequeue_task(cfs_rq, task, false); + cfs_rq->curr = NULL; /* never touch it again */ + spin_unlock(&cfs_rq->lock); +} + +static inline void __put_task(struct cfs_rq *cfs_rq, struct cfs_task *task) +{ + spin_lock(&cfs_rq->lock); + if (!task->on_rq) + enqueue_task(cfs_rq, task, false); + spin_unlock(&cfs_rq->lock); +} + +static void __fork_task(struct cfs_rq *cfs_rq, struct cfs_task *task) +{ + struct cfs_task *curr = cfs_rq->curr; + update_curr(cfs_rq); + if (curr) + task->vruntime = curr->vruntime; + place_task(cfs_rq, task, true); + task->vruntime -= cfs_rq->min_vruntime; +} + +static inline int find_target_cpu(struct cfs_task *task, bool new_task) +{ + if (new_task) { + return atomic_fetch_add(&TARGET_CPU, 1) % USED_CPUS; + } else { + return task->last_run; + } +} + +int cfs_sched_spawn(struct task *t, int cpu) +{ + struct cfs_task *task = cfs_task_of(t); + struct cfs_rq *cfs_rq = cpu_rq(find_target_cpu(task, true)); + + __fork_task(cfs_rq, task); + __put_task(cfs_rq, task); + + return 0; +} + +static void __set_next_task(struct cfs_rq *cfs_rq, struct cfs_task *task) +{ + if (task->on_rq) + __dequeue_task(cfs_rq, task); + task->exec_start = now_ns(); + task->last_run = g_logic_cpu_id; + task->prev_sum_exec_runtime = task->sum_exec_runtime; + cfs_rq->curr = task; +} + +struct task *cfs_sched_pick_next() +{ + struct cfs_rq *rq = this_rq(); + struct cfs_task *task; + + assert_spin_lock_held(&rq->lock); + + if (!rq->nr_running) + return NULL; + + task = __pick_first_task(rq); + __set_next_task(rq, task); + + return task_of(task); +} + +void cfs_sched_yield() +{ + struct cfs_rq *rq = this_rq(); + struct cfs_task *prev = rq->curr; + assert(this_rq()->curr == cfs_task_of(task_self())); + + spin_lock(&rq->lock); + if (prev->on_rq) { + update_curr(rq); + __enqueue_task(rq, prev); + } + rq->curr = NULL; + spin_unlock(&rq->lock); +} + +void cfs_sched_wakeup(struct task *t) +{ + struct cfs_task *task = cfs_task_of(t); + struct cfs_rq *rq = cpu_rq(find_target_cpu(task, false)); + + spin_lock(&rq->lock); + if (!task->on_rq) + enqueue_task(rq, task, true); + spin_unlock(&rq->lock); +} + +void cfs_sched_block() +{ + struct cfs_rq *rq = this_rq(); + + assert(rq->curr->on_rq); + + spin_lock(&rq->lock); + dequeue_task(rq, rq->curr, true); + rq->curr = NULL; + spin_unlock(&rq->lock); +} + +static bool check_preempt_tick(struct cfs_rq *cfs_rq, struct cfs_task *curr) +{ + struct cfs_task *first; + __nsec delta_exec, ideal_runtime; + int64_t delta; + + ideal_runtime = sched_slice(cfs_rq, curr); + delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; + if (delta_exec > ideal_runtime) + return true; + if (delta_exec < sysctl_sched_min_granularity) + return false; + + first = __pick_first_task(cfs_rq); + delta = curr->vruntime - first->vruntime; + if (delta < 0) + return false; + if ((uint64_t)delta > ideal_runtime) + return true; + + return false; +} + +bool cfs_sched_preempt() +{ + bool resched = false; + struct cfs_rq *cfs_rq = this_rq(); + + assert_local_irq_disabled(); + + spin_lock(&cfs_rq->lock); + update_curr(cfs_rq); + if (cfs_rq->nr_running > 1) + resched = check_preempt_tick(cfs_rq, cfs_rq->curr); + spin_unlock(&cfs_rq->lock); + + log_debug("%s return %d", __func__, resched); + + return resched; +} diff --git a/libos/sched/policy/fifo.c b/libos/sched/policy/fifo.c new file mode 100644 index 0000000..83e917f --- /dev/null +++ b/libos/sched/policy/fifo.c @@ -0,0 +1,132 @@ +/* + * fifo.c: a fifo scheduler implemented with fixed runqueue and load balance + */ + +#include +#include +#include +#include + +#include +#include +#include + +__thread struct fifo_rq *this_rq; +struct fifo_rq *rqs[USED_CPUS]; + +int fifo_sched_init_percpu(void *percpu_data) +{ + struct fifo_rq *rq = (struct fifo_rq *)percpu_data; + + // TODO: multiple apps + if (current_app_id() == 0) { + rqs[current_cpu_id()] = this_rq = rq; + spin_lock_init(&rq->lock); + rq->k = thisk(); + rq->head = rq->tail = 0; + list_head_init(&rq->overflow); + } + + return 0; +} + +static void put_task(struct fifo_rq *rq, struct task *task) +{ + uint32_t rq_head; + int flags; + + assert(task != NULL); + + local_irq_save(flags); + rq_head = atomic_load_acq(&rq->head); + if (unlikely(rq->tail - rq_head >= RUNTIME_RQ_SIZE)) { + spin_lock(&rq->lock); + list_add_tail(&rq->overflow, &task->link); + spin_unlock(&rq->lock); + local_irq_restore(flags); + return; + } + RQ_TAIL(rq) = task; + atomic_store_rel(&rq->tail, rq->tail + 1); + local_irq_restore(flags); +} + +int fifo_sched_spawn(struct task *task, int cpu) +{ + if (task == NULL || cpu < 0 || cpu > USED_CPUS) + return -1; + put_task(this_rq(), task); + return 0; +} + +void fifo_sched_yield() { put_task(this_rq(), task_self()); } + +void fifo_sched_wakeup(struct task *task) { put_task(this_rq(), task); } + +static bool steal_task(struct fifo_rq *l, struct fifo_rq *r) +{ + struct task *task = NULL; + uint32_t avail, rq_head, i; + + if (!spin_try_lock(&r->lock)) + return false; + + /* try to steal directly from the runqueue */ + avail = atomic_load_acq(&r->tail) - r->head; + if (avail) { + /* steal half the tasks */ + avail = div_up(avail, 2); + rq_head = r->head; + for (i = 0; i < avail; i++) { + l->tasks[i] = r->tasks[rq_head++ & RQ_SIZE_MASK]; + } + atomic_store_rel(&r->head, rq_head); + spin_unlock(&r->lock); + l->tail = avail; + ADD_STAT(TASKS_STOLEN, avail); + return true; + } + + /* check for overflow tasks */ + task = list_pop(&r->overflow, struct task, link); + if (task) + goto done; + + /* check for softirqs */ + task = softirq_task(r->k, SOFTIRQ_MAX_BUDGET); + if (task) + goto done; + +done: + if (task) { + l->tasks[l->tail++] = task; + ADD_STAT(TASKS_STOLEN, 1); + } + spin_unlock(&r->lock); + return task != NULL; +} + +void fifo_sched_balance() +{ + int cpu = current_cpu_id(); + struct fifo_rq *l = cpu_rq(cpu); + int i, sibling, idx; + uint32_t start_idx; + + assert_spin_lock_held(&l->lock); + assert(RQ_IS_EMPTY(l)); + + l->head = l->tail = 0; + + // sibling = cpu_sibling(cpu); + // if (sibling >= 0 && steal_task(l, cpu_rq(sibling))) + // return; + + /* try to steal from every kthread */ + start_idx = rand_crc32c((uintptr_t)l); + for (i = 0; i < USED_CPUS - 1; i++) { + idx = (start_idx + i) % (USED_CPUS - 1); + if (idx != cpu && steal_task(l, cpu_rq(idx))) + return; + } +} diff --git a/libos/sched/policy/rr.c b/libos/sched/policy/rr.c new file mode 100644 index 0000000..ac9cc3b --- /dev/null +++ b/libos/sched/policy/rr.c @@ -0,0 +1,101 @@ +/* + * rr.c: a round-robin scheduler + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +DEFINE_PERCPU(struct fifo_rq *, rqs); + +static atomic_int TARGET_CPU = 0; + +static int find_target_cpu(struct task *task, bool new_task) +{ + if (new_task) { + int cpu = atomic_fetch_add(&TARGET_CPU, 1) % USED_CPUS; + return cpu; + } else { + return fifo_task_of(task)->last_run; + } +} + +static void put_task(struct fifo_rq *rq, struct task *task) +{ + unsigned int tail = atomic_fetch_add_explicit(&rq->tail, 1, memory_order_acquire); + if (tail >= atomic_load_acq(&rq->head) + RUNTIME_RQ_SIZE) { + panic("runqueue full"); + } + atomic_store_rel(&rq->tasks[tail & RQ_SIZE_MASK], task); +} + +struct task *fifo_sched_pick_next() +{ + struct fifo_rq *rq = this_rq(); + unsigned int head = atomic_load_relax(&rq->head); + struct task *task = (struct task *)atomic_exchange_explicit( + (atomic_ullong *)&rq->tasks[head & RQ_SIZE_MASK], NULL, memory_order_acquire); + if (!task) { + goto done; + } + atomic_store_rel(&rq->head, head + 1); + fifo_task_of(task)->last_run = current_cpu_id(); + +done: + return task; +} + +int fifo_sched_spawn(struct task *task, int cpu) +{ + if (task == NULL || cpu < 0 || cpu > USED_CPUS) + return -1; + fifo_task_of(task)->quan = 0; + cpu = find_target_cpu(task, true); + put_task(cpu_rq(cpu), task); + atomic_inc(&cpu_rq(cpu)->num_tasks); + return 0; +} + +void fifo_sched_yield() { put_task(this_rq(), task_self()); } + +void fifo_sched_wakeup(struct task *task) +{ + int cpu = find_target_cpu(task, false); + struct fifo_rq *rq = cpu_rq(cpu); + put_task(rq, task); + atomic_inc(&rq->num_tasks); +} + +int fifo_sched_init_percpu(void *percpu_data) +{ + struct fifo_rq *rq = (struct fifo_rq *)percpu_data; + + percpu_get(rqs) = rq; + + if (current_app_id() == 0) { + rq->k = thisk(); + rq->head = rq->tail = 0; + rq->num_tasks = 0; + } + + return 0; +} + +bool fifo_sched_preempt() +{ + struct fifo_task *task = fifo_task_of(task_self()); + if (++task->quan >= PREEMPT_QUAN) { + task->quan = 0; + return true; + } + return false; +} diff --git a/libos/sched/policy/sq.c b/libos/sched/policy/sq.c new file mode 100644 index 0000000..e21f986 --- /dev/null +++ b/libos/sched/policy/sq.c @@ -0,0 +1,165 @@ +/* + * sq.c: single queue c-FCFS scheduler + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +static struct sq_dispatcher *global_dispatcher; + +DEFINE_PERCPU(struct sq_worker *, workers); + +static volatile bool worker_ready[USED_CPUS]; +static volatile bool worker_preempted[USED_CPUS]; +static bool dispatcher_ready = false; + +int sq_sched_spawn(struct task *task, int cpu) +{ + if (current_cpu_id() != 0) { + log_err("%s: must be called on the dispatcher (CPU 0)", __func__); + return -1; + } + + if (!dispatcher_ready) { + dispatcher_ready = true; + this_worker()->cur_task = task; + atomic_store_rel(&this_worker()->state, WORKER_QUEUING); + return 0; + } else { + task->skip_free = true; + task->allow_preempt = true; + return queue_push(&global_dispatcher->pending_tasks, task); + } +} + +void sq_sched_poll() +{ + struct sq_worker *worker; + enum sq_worker_state worker_state; + struct task *task; + int i; + + if (current_cpu_id() == 0) { + for (i = 1; i < global_dispatcher->num_workers + 1; i++) { + worker = cpu_worker(i); + worker_state = atomic_load_acq(&worker->state); + // log_debug("Poll %p %d %d", worker, i, worker->uintr_index); + + if (worker_state == WORKER_RUNNING) { + if (global_dispatcher->preemption_quantum && !worker_preempted[i] && + now_ns() > worker->start_time + global_dispatcher->preemption_quantum) { + log_debug("! %d %p start %.3lf now %.3lf", i, worker->cur_task, + (double)worker->start_time / NSEC_PER_USEC, + (double)now_ns() / NSEC_PER_USEC); + _senduipi(worker->uintr_index); + /* Avoid preempting more times. */ + worker_preempted[i] = true; + } + continue; + } + + /* Previous task may have not been dequeued. */ + if (worker_state == WORKER_QUEUING) + continue; + + if (worker_state == WORKER_FINISHED) { + log_debug("worker %d %p finished", i, worker->cur_task); + /* All tasks are created and freed by dispatcher. */ + task_free(worker->cur_task); + } else if (worker_state == WORKER_PREEMPTED) { + log_debug("worker %d %p preempted", i, worker->cur_task); + /* A preempted worker must have a task. */ + assert(worker->cur_task != NULL); + queue_push(&global_dispatcher->pending_tasks, worker->cur_task); + } + + task = queue_pop(&global_dispatcher->pending_tasks); + if (task) { + log_debug("worker %d %p started", i, task); + worker->cur_task = task; + atomic_store_rel(&worker->state, WORKER_QUEUING); + worker_preempted[i] = false; + } else { + atomic_store_rel(&worker->state, WORKER_IDLE); + } + } + } +} + +int sq_sched_set_params(void *params) +{ + struct sq_params *p = params; + log_info("sq_sched_set_params: num_workers=%d preemption_quantum=%d", p->num_workers, + p->preemption_quantum); + + if (p->num_workers >= 0 && p->num_workers < USED_CPUS) { + global_dispatcher->num_workers = p->num_workers; + global_dispatcher->preemption_quantum = p->preemption_quantum * NSEC_PER_USEC; + return 0; + } + + return -EINVAL; +} + +int sq_sched_init(void *data) +{ + struct sq_dispatcher *dispatcher = data; + dispatcher->num_workers = USED_CPUS; + queue_init(&dispatcher->pending_tasks); + global_dispatcher = dispatcher; + memset((void *)worker_ready, 0, sizeof(bool) * USED_CPUS); + memset((void *)worker_preempted, 0, sizeof(bool) * USED_CPUS); + return 0; +} + +int sq_sched_init_percpu(void *percpu_data) +{ + int ret, i; + struct sq_worker *worker = percpu_data; + + worker->cur_task = NULL; + worker->state = WORKER_IDLE; + percpu_get(workers) = worker; + + if (current_cpu_id() == 0) { + for (i = 1; i < USED_CPUS; i++) { + while (!atomic_load_acq(&worker_ready[i])); + + ret = uintr_register_sender(cpu_worker(i)->uintr_fd, 0); + if (ret < 0) { + log_err("failed to register interrupt sender\n"); + return -1; + } + cpu_worker(i)->uintr_index = ret; + log_debug("worker %p %d %d", cpu_worker(i), i, ret); + } + log_info("SQ dispatcher registered as a sender for all workers."); + } else { + extern void uintr_handler(); + ret = uintr_register_handler(uintr_handler, 0); + if (ret < 0) { + log_err("failed to register interrupt handler\n"); + return -1; + } + + worker->uintr_fd = uintr_vector_fd(SQ_UVEC, 0); + if (worker->uintr_fd < 0) { + log_err("failed to register interrupt vector\n"); + return -1; + } + + atomic_store_rel(&worker_ready[current_cpu_id()], true); + local_irq_disable(); + } + + return 0; +} diff --git a/libos/sched/policy/sq_lcbe.c b/libos/sched/policy/sq_lcbe.c new file mode 100644 index 0000000..4d71469 --- /dev/null +++ b/libos/sched/policy/sq_lcbe.c @@ -0,0 +1,455 @@ +/* + * sq.c: single queue c-FCFS scheduler + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct sq_dispatcher *global_dispatcher; +DEFINE_PERCPU(struct sq_cpu *, sq_cpus); +static volatile bool lc_worker_preempted[USED_CPUS]; +static volatile bool be_worker_preempted[USED_CPUS]; + +static bool dispatcher_ready = false; + +static inline struct sq_task *sq_task_of(struct task *task) +{ + return (struct sq_task *)&task->policy_task_data; +} + +static inline __nsec task_latency(struct task *task) +{ + return now_ns() - sq_task_of(task)->ingress; +} + +static inline double task_slo(struct task *task) +{ + return (double)sq_task_of(task)->active / task_latency(task); +} + +extern __thread struct task *__curr; +struct task *sq_sched_pick_next() +{ + /* We use this flag to keep the state consistent */ + struct sq_cpu *cpu = this_sq_cpu(); + if (atomic_load_acq(&cpu->need_sched)) { + log_debug("need sched! %p", cpu); + cpu->is_lc = !cpu->is_lc; + atomic_store_rel(&cpu->need_sched, false); + } + + struct sq_worker *worker = this_worker(); + if (current_cpu_id() == 0 || !cpu->is_lc) { + return worker->cur_task; + } else { + if (atomic_load_acq(&worker->state) == WORKER_QUEUING) { + /* enter twice from BE to LC */ + if (current_app_id() == SQ_LC) { + worker->start_time = now_ns(); + sq_task_of(worker->cur_task)->start = now_ns(); + atomic_store_rel(&worker->state, WORKER_RUNNING); + } + return worker->cur_task; + } else + return NULL; + } +} + +void sq_sched_yield() +{ + struct sq_cpu *cpu; + + /* Switch APP on CPU 0 */ + if (current_cpu_id() == 0) { + cpu = this_sq_cpu(); + if (current_app_id() == SQ_BE) { + cpu->is_lc = true; + global_dispatcher->be_ready[0] = true; + } else { + cpu->is_lc = false; + } + cpu->lc.state = WORKER_QUEUING; + } +} + +void sq_sched_finish_task(struct task *task) +{ + struct sq_worker *worker = this_worker(); + + /* + * LC task: finished by worker and released by dispatcher + * BE task: running in background; only task on CPU 0 finishes + */ + atomic_store_rel(&worker->state, WORKER_FINISHED); + + /* Collect BE workers */ + if (current_cpu_id() == 0) { + worker->cur_task = NULL; + this_sq_cpu()->is_lc = true; + this_sq_cpu()->lc.state = WORKER_QUEUING; + } else if (current_app_id() == SQ_BE) { + log_debug("BE task %d finished %lu %lu", worker->cur_task->id, + now_ns() - sq_task_of(worker->cur_task)->start, + sq_task_of(worker->cur_task)->active); + worker->cur_task = NULL; + } +} + +int sq_sched_spawn(struct task *task, int cpu_id) +{ + /* + * LC task: worker task will pushed back to global queue + * BE task: do nothing; each worker only has one background task + */ + if (current_app_id() == SQ_BE && current_cpu_id() != 0) + return 0; + + if (current_cpu_id() != 0) { + log_err("%s: must be called on the dispatcher (CPU 0)", __func__); + return -1; + } + + if (!dispatcher_ready) { + struct sq_cpu *cpu = sq_cpu(cpu_id); + if (cpu_id == 0) { + if (!cpu->be.cur_task) { + log_debug("BE comes"); + cpu->be.cur_task = task; + } else if (!cpu->lc.cur_task) { + log_debug("LC comes"); + cpu->lc.cur_task = task; + cpu->lc.state = WORKER_QUEUING; + dispatcher_ready = true; + } else { + log_err("%s: Too many tasks", __func__); + return -EINVAL; + } + } else { + task->allow_preempt = true; + cpu->be.cur_task = task; + } + return 0; + } else { + task->skip_free = true; + task->allow_preempt = true; + sq_task_of(task)->ingress = now_ns(); + sq_task_of(task)->active = 0; + return queue_push(&global_dispatcher->pending_tasks, task); + } +} + +static bool is_congested(void) +{ + if (queue_is_empty(&global_dispatcher->pending_tasks)) + return false; + + struct task *task = queue_head(&global_dispatcher->pending_tasks); + + if (sq_task_of(task)->active && task_slo(task) < global_dispatcher->congestion_thresh) { + log_debug("Congested %lu %lu", sq_task_of(task)->active, task_latency(task)); + return true; + } + + return false; +} + +static int pick_cpu(void) +{ + /* + 1. try to allocate a hyperthread pair core + 2. try the core that we most recently ran on + 3. the core is busy and no core available, should we preempt it? + 4. pick the lowest available core + 5. no cores available, take from the first bursting proc + */ + return bitmap_find_next_cleared(global_dispatcher->lc_cpus, USED_CPUS, 0); +} + +/* core allocation */ +static void adjust_cpus(void) +{ + int cpu; + + if (now_ns() <= global_dispatcher->last_adjust + global_dispatcher->adjust_quantum) + return; + + if (!is_congested()) + return; + + cpu = pick_cpu(); + if (cpu == USED_CPUS) + return; + + /* LC will never give the CPU back to BE. */ + if (!atomic_load_acq(&be_worker_preempted[cpu])) { + log_debug("LC asks for %d", cpu); + _senduipi(cpu_worker(cpu)->uintr_index); + be_worker_preempted[cpu] = true; + } + + global_dispatcher->last_adjust = now_ns(); +} + +bool sq_sched_preempt() +{ + struct sq_cpu *cpu = this_sq_cpu(); + struct sq_worker *worker = this_worker(); + if (atomic_load_acq(&worker->state) == WORKER_RUNNING) { + sq_task_of(worker->cur_task)->active += now_ns() - sq_task_of(worker->cur_task)->start; + if (!cpu->is_lc) { + cpu->need_sched = true; + bitmap_set(global_dispatcher->lc_cpus, current_cpu_id()); + global_dispatcher->lc_nr_cpus++; + } else { + atomic_store_rel(&worker->state, WORKER_PREEMPTED); + } + + return true; + } else { + return false; + } +} + +void sq_sched_poll() +{ + struct sq_cpu *cpu; + struct sq_worker *worker; + enum sq_worker_state worker_state; + struct task *task; + int i; + + if (current_cpu_id() == 0) { + while (!atomic_load_acq(&global_dispatcher->be_ready[0])) task_yield(); + + adjust_cpus(); + + for (i = 1; i < USED_CPUS; i++) { + cpu = sq_cpu(i); + + if (bitmap_test(global_dispatcher->lc_cpus, i)) { + worker = &cpu->lc; + worker_state = atomic_load_acq(&worker->state); + + if (worker_state == WORKER_RUNNING) { + if (global_dispatcher->preemption_quantum && !lc_worker_preempted[i] && + now_ns() > worker->start_time + global_dispatcher->preemption_quantum) { + log_debug("! %d %d start %.3lf now %.3lf", i, worker->cur_task->id, + (double)worker->start_time / NSEC_PER_USEC, + (double)now_ns() / NSEC_PER_USEC); + _senduipi(worker->uintr_index); + /* Avoid preempting more times. */ + lc_worker_preempted[i] = true; + } + continue; + } + + /* Previous task may have not been dequeued. */ + if (worker_state == WORKER_QUEUING) + continue; + + if (worker_state == WORKER_FINISHED) { + log_debug("%d worker %d finished", i, worker->cur_task->id); + /* All tasks are created and freed by dispatcher. */ + task_free(worker->cur_task); + } else if (worker_state == WORKER_PREEMPTED) { + log_debug("%d worker %d preempted", i, worker->cur_task->id); + /* A preempted worker must have a task. */ + queue_push(&global_dispatcher->pending_tasks, worker->cur_task); + } + + task = queue_pop(&global_dispatcher->pending_tasks); + if (task) { + log_debug("%d worker %d started", i, task->id); + worker->cur_task = task; + lc_worker_preempted[i] = false; + atomic_store_rel(&worker->state, WORKER_QUEUING); + } else { + atomic_store_rel(&worker->state, WORKER_IDLE); + } + } else { /* Run BE workers */ + worker = &cpu->be; + worker_state = atomic_load_acq(&worker->state); + + if (worker_state == WORKER_IDLE) + atomic_store_rel(&worker->state, WORKER_RUNNING); + + if (cpu->is_lc && !atomic_load_acq(&cpu->need_sched)) { + log_debug("Run BE %d %d", i, cpu->is_lc); + atomic_store_rel(&cpu->need_sched, true); + } + } + } + } +} + +int sq_sched_set_params(void *params) +{ + struct sq_params *p = params; + log_info("sq_sched_set_params: num_workers=%d preemption_quantum=%d guaranteed_cpus=%d " + "congestion_thresh=%.3lf", + p->num_workers, p->preemption_quantum, p->guaranteed_cpus, p->congestion_thresh); + + if (p->num_workers >= 0 && p->num_workers < USED_CPUS) { + global_dispatcher->num_workers = p->num_workers; + global_dispatcher->preemption_quantum = p->preemption_quantum * NSEC_PER_USEC; + global_dispatcher->lc_guaranteed_cpus = p->guaranteed_cpus; + for (unsigned int i = 0; i < global_dispatcher->lc_guaranteed_cpus + 1; i++) + bitmap_set(global_dispatcher->lc_cpus, i); + global_dispatcher->lc_nr_cpus = global_dispatcher->lc_guaranteed_cpus; + global_dispatcher->adjust_quantum = p->adjust_quantum * NSEC_PER_USEC; + global_dispatcher->congestion_thresh = p->congestion_thresh; + return 0; + } + + return -EINVAL; +} + +int sq_sched_init(void *data) +{ + struct sq_dispatcher *dispatcher = data; + global_dispatcher = dispatcher; + if (current_app_id() == SQ_LC) { + dispatcher->num_workers = USED_CPUS; + queue_init(&dispatcher->pending_tasks); + memset((void *)lc_worker_preempted, 0, sizeof(bool) * USED_CPUS); + memset((void *)be_worker_preempted, 0, sizeof(bool) * USED_CPUS); + memset((void *)dispatcher->lc_ready, 0, sizeof(int) * USED_CPUS); + memset((void *)dispatcher->be_ready, 0, sizeof(int) * USED_CPUS); + bitmap_init(dispatcher->lc_cpus, USED_CPUS, false); + dispatcher->last_adjust = now_ns(); + } else { + dispatcher->be_pid = getpid(); + log_debug("BE pid %d", dispatcher->be_pid); + } + return 0; +} + +extern void uintr_handler(); +static inline int uipi_receiver(struct sq_worker *worker) +{ + int ret = 0; + + if ((ret = uintr_register_handler(uintr_handler, 0)) < 0) { + log_err("failed to register interrupt handler\n"); + goto out; + } + + if ((ret = uintr_vector_fd(SQ_UVEC, 0)) < 0) { + log_err("failed to register interrupt vector\n"); + goto out; + } + worker->uintr_fd = ret; + +out: + return ret; +} + +static inline int uipi_sender(struct sq_worker *worker) +{ + int ret; + + if ((ret = uintr_register_sender(worker->uintr_fd, 0)) < 0) { + log_err("failed to register interrupt sender\n"); + return ret; + } + worker->uintr_index = ret; + + return 0; +} + +int sq_sched_init_percpu(void *percpu_data) +{ + int ret = 0, i, pidfd; + struct sq_cpu *cpu = percpu_data; + + if (current_app_id() == SQ_LC) { + init_worker(&cpu->lc); + init_worker(&cpu->be); + /* First APP is LC. */ + cpu->is_lc = true; + } + percpu_get(sq_cpus) = cpu; + +#ifdef SKYLOFT_UINTR + if (current_cpu_id() == 0) { + if (current_app_id() == SQ_LC) { + /* LC dispatcher can access fd of LC worker in the same process. */ + for (i = 1; i < USED_CPUS; i++) { + while (!atomic_load_acq(&global_dispatcher->lc_ready[i])); + if ((ret = uipi_sender(&sq_cpu(i)->lc)) < 0) + goto out; + } + log_info("SQ dispatcher registered as a sender for all LC workers."); + + /* LC dispatcher cannot directly access fd of BE worker. The fd is shared by BE process. + */ + for (i = 1; i < USED_CPUS; i++) { + while (!atomic_load_acq(&global_dispatcher->be_ready[i])); + if (i == 1) { + if ((ret = pidfd_open(global_dispatcher->be_pid)) < 0) { + log_err("Failed to open a process fd: %s", strerror(errno)); + goto out; + } + pidfd = ret; + } + + if ((ret = pidfd_getfd(pidfd, sq_cpu(i)->be.uintr_fd)) < 0) { + log_err("Failed to steal an fd from another process: %s", strerror(errno)); + goto out; + } + log_debug("old fd %d new fd %d", sq_cpu(i)->be.uintr_fd, ret); + sq_cpu(i)->be.uintr_fd = ret; + if ((ret = uipi_sender(&sq_cpu(i)->be)) < 0) + goto out; + } + log_info("SQ dispatcher registered as a sender for all BE workers."); + + while (!atomic_load_acq(&sq_cpu(0)->be.cur_task)); + } + } else { + if (current_app_id() == SQ_LC) { + ret = uipi_receiver(&cpu->lc); + atomic_store_rel(&global_dispatcher->lc_ready[current_cpu_id()], 1); + } else { + ret = uipi_receiver(&cpu->be); + atomic_store_rel(&global_dispatcher->be_ready[current_cpu_id()], 1); + } + local_irq_disable(); + } +#endif + +out: + return ret; +} + +void sq_sched_dump_tasks() +{ + int i; + struct sq_cpu *cpu; + + printf("Core Allocation Status:\n"); + printf("\t0 Dispatcher\n"); + for (i = 1; i < USED_CPUS; i++) { + cpu = sq_cpu(i); + + if (cpu->is_lc) { + printf("\t%d LC\n", i); + } else { + printf("\t%d BE\n", i); + } + } +} \ No newline at end of file diff --git a/libos/sched/sched.c b/libos/sched/sched.c new file mode 100644 index 0000000..5e0d3e1 --- /dev/null +++ b/libos/sched/sched.c @@ -0,0 +1,438 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define SHM_SCHED_DATA_HUGE_PATH "/mnt/huge/skyloft_sched_data_huge" +#define SHM_SCHED_DATA_HUGE_BASE_ADDR 0x300000000000UL + +static void *huge_pages_base; +static void *shm_sched_data; +static void *shm_sched_data_percpu[USED_CPUS]; + +__thread struct task *__curr, *__idle; +__thread struct kthread *localk; +__thread volatile unsigned int preempt_cnt; +static __thread uint32_t rcu_gen; +extern __thread int g_logic_cpu_id; + +static inline void switch_to_app(void *arg) +{ + struct task *next = arg; + int target_app = next->app_id; + int target_tid = shm_apps[target_app].all_ks[current_cpu_id()].tid; + int ret = 0; + +retry: + if (unlikely(atomic_load_acq(&shm_apps[target_app].exited))) { + log_warn("App %d has exited", target_app); + atomic_store(&shm_metadata->apps[current_cpu_id()], proc->id); + return; + } + + /* trace the application running on this CPU */ + atomic_store(&shm_metadata->apps[current_cpu_id()], target_app); + + /* switch userspace through kernel */ + ret = skyloft_switch_to(target_tid); + + if (unlikely(ret)) { + log_warn("Failed to switch to app %d: %d", target_tid, ret); + goto retry; + } +} + +/** + fast_schedule - (fastpath) switch directly to the next task + */ +static __always_inline void fast_schedule() +{ + struct task *prev = __curr, *next; + + assert_local_irq_disabled(); + assert(__curr != NULL); + + __sched_percpu_lock(g_logic_cpu_id); + next = __sched_pick_next(); + /* slow path: switch to idle and run schedule() */ + if (unlikely(!next)) { + log_debug("%s: (%d,%d) -> ", __func__, prev->app_id, prev->id); + __context_switch_to_idle(&prev->rsp, __idle->rsp); + return; + } + __sched_percpu_unlock(g_logic_cpu_id); + + log_debug("%s: (%d,%d) -> (%d,%d)", __func__, prev->app_id, prev->id, next->app_id, next->id); + + /* increment the RCU generation number (odd is in task) */ + atomic_store_rel(&rcu_gen, rcu_gen + 2); + assert((rcu_gen & 0x1) == 0x1); + +#if defined(SKYLOFT_TIMER) && !defined(SKYLOFT_UINTR) && !defined(SKYLOFT_DPDK) + softirq_run(SOFTIRQ_MAX_BUDGET); +#endif + + assert(task_is_runnable(next)); + /* check if we're switching into the same task as before */ + if (unlikely(next == prev)) { + next->stack_busy = false; + return; + } + + /* task must be scheduled atomically */ + if (unlikely(atomic_load_acq(&next->stack_busy))) { + /* wait until the scheduler finishes switching stacks */ + while (atomic_load_acq(&next->stack_busy)) cpu_relax(); + } + + if (unlikely(next->app_id != prev->app_id)) { + /* switch to idle first */ + atomic_store_rel(&prev->stack_busy, false); + switch_to_app(next); + return; + } + + /* switch stacks and enter the next task */ + __curr = next; + __context_switch(&prev->rsp, next->rsp, &prev->stack_busy); +} + +/** + * schedule - (slowpath) idle task + */ +__noreturn __noinline void schedule() +{ + struct task *next; + uint64_t elapsed; + + assert_local_irq_disabled(); + + /* unmark busy for the stack of the previous task */ + if (__curr != NULL) { + atomic_store_rel(&__curr->stack_busy, false); + __curr = NULL; + } + + /* increment the RCU generation number (even is in scheduler) */ + atomic_store_rel(&rcu_gen, rcu_gen + 1); + assert((rcu_gen & 0x1) == 0x0); + + STAT_CYCLES_BEGIN(elapsed); +again: + next = __sched_pick_next(); +#ifdef SKYLOFT_SCHED_CFS + __sched_percpu_unlock(g_logic_cpu_id); +#endif + if (unlikely(!next)) { +#ifdef SCHED_PERCPU +#ifdef SKYLOFT_DPDK + if ((next = softirq_task(localk, SOFTIRQ_MAX_BUDGET))) { + ADD_STAT(LOCAL_SPAWNS, 1); + goto done; + } +#else + /* check for softirqs */ + softirq_run(SOFTIRQ_MAX_BUDGET); +#endif + /* optional load balance */ + __sched_balance(); +#ifdef SKYLOFT_SCHED_CFS + __sched_percpu_lock(g_logic_cpu_id); +#endif +#endif + goto again; + } +done: + /* release the lock */ + __sched_percpu_unlock(g_logic_cpu_id); + + log_debug("%s: -> (%d,%d)", __func__, next->app_id, next->id); + + /* udpate stat counters */ + ADD_STAT_CYCLES(IDLE_CYCLES, elapsed); + ADD_STAT(IDLE, 1); + + /* increment the RCU generation number (odd is in task) */ + atomic_store_rel(&rcu_gen, rcu_gen + 1); + assert((rcu_gen & 0x1) == 0x1); + + /* task must be scheduled atomically */ + if (unlikely(atomic_load_acq(&next->stack_busy))) { + /* wait until the scheduler finishes switching stacks */ + while (atomic_load_acq(&next->stack_busy)) cpu_relax(); + } + + ADD_STAT(SWITCH_TO, 1); + if (unlikely(next->app_id != current_app_id())) { + switch_to_app(next); + + /* this function should not return, so we call `schedule` again */ + schedule(); + } + + /* switch stacks and enter the next task */ + __curr = next; + __context_switch_from_idle(next->rsp); +} + +__noreturn void start_schedule(void) +{ + atomic_store_rel(&rcu_gen, 1); + __sched_percpu_lock(g_logic_cpu_id); + schedule(); +} + +int sched_init_percpu() +{ + if (sched_task_init_percpu() < 0) { + log_err("sched_task_init_percpu failed"); + return -1; + } + + void *data_percpu = shm_sched_data_percpu[g_logic_cpu_id]; + log_debug("sched: shm_sched_data_percpu[%d]: %p", current_cpu_id(), data_percpu); + + if (__sched_init_percpu(data_percpu) < 0) { + log_err("sched: init policy percpu failed"); + return -1; + } + + struct task *task = task_create_idle(); + if (!task) { + log_err("sched: create idle task failed"); + return -1; + } + + __curr = __idle = task; + + extern uint32_t *rcu_gen_percpu[USED_CPUS]; + rcu_gen_percpu[g_logic_cpu_id] = &rcu_gen; + + return 0; +} + +static int sched_shm_map() +{ + int i; + size_t task_size, policy_size, policy_percpu_size; + off_t policy_off, policy_percpu_off; + size_t huge_pages_size = 0; + + task_size = TASK_SIZE_PER_APP * MAX_APPS; + policy_size = align_up(SCHED_DATA_SIZE, PGSIZE_2MB); + policy_percpu_size = align_up(SCHED_PERCPU_DATA_SIZE, PGSIZE_2MB); + + huge_pages_size = task_size; + policy_off = huge_pages_size; + huge_pages_size += policy_size; + policy_percpu_off = huge_pages_size; + huge_pages_size += policy_percpu_size * USED_CPUS; + + huge_pages_base = + mem_map_shm_file(SHM_SCHED_DATA_HUGE_PATH, (void *)SHM_SCHED_DATA_HUGE_BASE_ADDR, + huge_pages_size, PGSIZE_2MB, 0); + if (huge_pages_base != (void *)SHM_SCHED_DATA_HUGE_BASE_ADDR) { + log_err("sched: open shm %s failed", SHM_SCHED_DATA_HUGE_PATH); + return -ENOMEM; + } + + shm_sched_data = (void *)(huge_pages_base + policy_off); + for (i = 0; i < USED_CPUS; i++) + shm_sched_data_percpu[i] = + (void *)(huge_pages_base + policy_percpu_off + i * policy_percpu_size); + + return 0; +} + +int sched_init() +{ + int ret; + + log_info("sched: scheduling policy %s", __sched_name); + + if ((ret = sched_shm_map()) < 0) { + log_err("sched: map huge pages failed %d", ret); + return ret; + } + + if ((ret = sched_task_init(huge_pages_base + current_app_id() * TASK_SIZE_PER_APP)) < 0) { + log_err("sched: init task failed %d", ret); + return ret; + } + + if ((ret = __sched_init(shm_sched_data)) < 0) { + log_err("sched: init policy failed"); + return ret; + } + + return 0; +} + +/* task APIs */ + +int task_spawn(int cpu_id, thread_fn_t fn, void *arg, int stack_size) +{ + struct task *task; + int ret; + int flags; + + ADD_STAT(LOCAL_SPAWNS, 1); + + task = task_create(fn, arg); + if (unlikely(!task)) + return -ENOMEM; + + local_irq_save(flags); + ret = __sched_spawn(task, cpu_id); + if (unlikely(ret)) { + log_warn("sched: %s failed to spawn task on %d", __func__, cpu_id); + task_free(task); + } + local_irq_restore(flags); + return ret; +} + +int task_enqueue(int cpu_id, struct task *task) +{ + int flags, ret; + ADD_STAT(LOCAL_SPAWNS, 1); + local_irq_save(flags); + ret = __sched_spawn(task, cpu_id); + local_irq_restore(flags); + return ret; +} + +/** + * task_yield - yield the current running task + */ +void task_yield() +{ + int flags; +#ifdef SCHED_PERCPU + softirq_run(SOFTIRQ_MAX_BUDGET); +#endif + local_irq_save(flags); + atomic_store_rel(&__curr->stack_busy, true); + __sched_yield(); + fast_schedule(); + local_irq_restore(flags); +} + +/** + * task_wakeup - wake up a BLOCKED task + */ +void task_wakeup(struct task *task) +{ + int flags; + assert(task_is_blocked(task)); + local_irq_save(flags); + task->state = TASK_RUNNABLE; + __sched_wakeup(task); + local_irq_restore(flags); +} + +/** + * task_block - marks a task as BLOCKED + * @lock: the lock to be released + */ +void task_block(spinlock_t *lock) +{ + int flags; + assert_preempt_disabled(); + assert_spin_lock_held(lock); + local_irq_disable(); + local_irq_save(flags); + preempt_enable(); + + __curr->state = TASK_BLOCKED; + __curr->stack_busy = true; + spin_unlock(lock); + __sched_block(); + fast_schedule(); + local_irq_restore(flags); +} + +static void __task_exit() +{ + /* task stack might be freed */ + __sched_finish_task(__curr); + if (!__curr->skip_free) + task_free(__curr); + __curr = NULL; + + __sched_percpu_lock(g_logic_cpu_id); + schedule(); +} + +/** + * task_exit - release the current running task and run schedule() + * @code: exit code or return code of the function + */ +__noreturn void task_exit(void *code) +{ + /* disable preemption before scheduling */ + local_irq_disable(); + __context_switch_to_fn_nosave(__task_exit, __idle->rsp); +} + +/* API implementations */ + +const char *__api sl_sched_policy_name() { return __sched_name; } + +int __api sl_current_task_id() { return -1; } + +int __api sl_sched_set_params(void *params) { return __sched_set_params(params); } + +void __api sl_sched_poll() { __sched_poll(); } + +int __api sl_task_spawn(thread_fn_t fn, void *arg, int stack_size) +{ + return task_spawn(g_logic_cpu_id, fn, arg, stack_size); +} + +int __api sl_task_spawn_oncpu(int cpu_id, thread_fn_t fn, void *arg, int stack_size) +{ + return task_spawn(cpu_id, fn, arg, stack_size); +} + +void __api sl_task_yield() { task_yield(); } + +__noreturn void __api sl_task_exit(void *code) { task_exit(code); } + +void __api sl_dump_tasks() { __sched_dump_tasks(); } + +#ifdef SKYLOFT_UINTR + +void __attribute__((target("general-regs-only"))) __attribute__((interrupt)) +uintr_handler(struct __uintr_frame *ui_frame, unsigned long long vector) +{ + /* reset UPID.PIR */ +#ifdef SCHED_PERCPU + _senduipi(uintr_index()); +#endif + ADD_STAT(UINTR, 1); + // softirq_run(SOFTIRQ_MAX_BUDGET); + /* check if rescheduling needed */ + if (__sched_preempt()) { + if (preempt_enabled() && __curr->allow_preempt) { + task_yield(); + } + } +} + +#endif diff --git a/libos/sched/softirq.c b/libos/sched/softirq.c new file mode 100644 index 0000000..bd05193 --- /dev/null +++ b/libos/sched/softirq.c @@ -0,0 +1,151 @@ +/* + * softirq.c - handles high priority events (timers, ingress packets, etc.) + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +struct softirq_work { + /* packets received */ + int recv_cnt; + /* packets completed (sent by I/O thread) */ + int compl_cnt; + /* budget left for timer */ + int timer_budget; + struct kthread *k; + struct rx_net_hdr *recv_reqs[SOFTIRQ_MAX_BUDGET]; + struct mbuf *compl_reqs[SOFTIRQ_MAX_BUDGET]; +}; + +static void softirq_gather_work(struct softirq_work *w, struct kthread *k, int budget) +{ + int recv_cnt = 0; + int compl_cnt = 0; + + budget = MIN(budget, SOFTIRQ_MAX_BUDGET); +#ifdef SKYLOFT_DPDK + while (budget-- > 0) { + uint64_t cmd; + uint64_t payload; + + if (!lrpc_recv(&k->rxq, &cmd, &payload)) + break; + + switch (cmd) { + case RX_NET_RECV: + w->recv_reqs[recv_cnt] = (struct rx_net_hdr *)payload; + recv_cnt++; + break; + case RX_NET_COMPLETE: + w->compl_reqs[compl_cnt++] = (struct mbuf *)payload; + break; + default: + log_err("net: invalid RXQ cmd '%ld'", cmd); + } + } +#endif + + w->k = k; + w->recv_cnt = recv_cnt; + w->compl_cnt = compl_cnt; + w->timer_budget = budget; +} + +static inline bool softirq_pending(struct kthread *k) +{ +#ifdef SKYLOFT_DPDK + return !lrpc_empty(&k->rxq) || timer_needed(k); +#else + return timer_needed(k); +#endif +} + +static void softirq_fn(void *arg) +{ + struct softirq_work *w = arg; + uint64_t elapsed; + STAT_CYCLES_BEGIN(elapsed); + +#ifdef SKYLOFT_DPDK + int i; + /* complete TX requests and free packets */ + for (i = 0; i < w->compl_cnt; i++) mbuf_free(w->compl_reqs[i]); + + /* deliver new RX packets to the runtime */ + net_rx_softirq(w->recv_reqs, w->recv_cnt); +#endif + + /* handle any pending timeouts */ + if (timer_needed(w->k)) + timer_softirq(w->k, w->timer_budget); + + ADD_STAT_CYCLES(SOFTIRQ_CYCLES, elapsed); +} + +/** + * softirq_task - creates a closure for softirq handling + * @k: the kthread from which to take RX queue commands + * @budget: the maximum number of events to process + * + * Returns a task that handles receive processing when executed or + * NULL if no receive processing work is available. + */ +struct task *softirq_task(struct kthread *k, int budget) +{ + struct task *t; + struct softirq_work *w; + + /* check if there's any work available */ + if (!softirq_pending(k)) + return NULL; + + t = task_create_with_buf(softirq_fn, (void **)&w, sizeof(struct softirq_work)); + if (unlikely(!t)) + return NULL; + + softirq_gather_work(w, k, budget); + + return t; +} + +/** + * softirq_run - handles softirq processing in the current task + * @budget: the maximum number of events to process + */ +bool softirq_run(int budget) +{ + struct kthread *k; + struct softirq_work w; + + k = getk(); + /* check if there's any work available */ + if (!softirq_pending(k)) { + putk(); + return false; + } + +#ifdef SKYLOFT_DPDK + __sched_percpu_lock(k->cpu); + softirq_gather_work(&w, k, budget); + __sched_percpu_unlock(k->cpu); + softirq_fn(&w); +#else + w.k = k; + w.timer_budget = MIN(budget, SOFTIRQ_MAX_BUDGET); + softirq_fn(&w); +#endif + + putk(); + return true; +} diff --git a/libos/sched/switch.S b/libos/sched/switch.S new file mode 100644 index 0000000..520de10 --- /dev/null +++ b/libos/sched/switch.S @@ -0,0 +1,73 @@ +.intel_syntax noprefix +.text + +.align 16 +.globl __context_switch +.type __context_switch, @function +__context_switch: + push rdi + push r15 + push r14 + push r13 + push r12 + push rbp + push rbx + mov [rdi], rsp + + /* clear the stack busy flag */ + mov byte ptr [rdx], 0 + + mov rsp, rsi + pop rbx + pop rbp + pop r12 + pop r13 + pop r14 + pop r15 + pop rdi +#ifdef SKYLOFT_UINTR + /* enable preemption */ + stui +#endif + ret + +.align 16 +.globl __context_switch_from_idle +.type __context_switch_from_idle, @function +__context_switch_from_idle: + mov rsp, rdi + pop rbx + pop rbp + pop r12 + pop r13 + pop r14 + pop r15 + pop rdi +#ifdef SKYLOFT_UINTR + /* enable preemption */ + stui +#endif + ret + +.align 16 +.globl __context_switch_to_idle +.type __context_switch_to_idle, @function +__context_switch_to_idle: + push rdi + push r15 + push r14 + push r13 + push r12 + push rbp + push rbx + mov [rdi], rsp + + mov rsp, rsi + jmp schedule + +.align 16 +.globl __context_switch_to_fn_nosave +.type __context_switch_to_fn_nosave, @function +__context_switch_to_fn_nosave: + mov rsp, rsi + jmp rdi diff --git a/libos/sched/task.c b/libos/sched/task.c new file mode 100644 index 0000000..bc33122 --- /dev/null +++ b/libos/sched/task.c @@ -0,0 +1,244 @@ +#include "skyloft/sched.h" +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +atomic_int task_count; +static atomic_int task_id_allocator; + +static __always_inline void __task_init(struct task *t, struct stack *s) +{ + extern int g_app_id; + t->app_id = g_app_id; + t->stack = s; + t->stack_busy = false; + t->state = TASK_RUNNABLE; + t->allow_preempt = false; + t->skip_free = false; +#if DEBUG + t->id = atomic_inc(&task_id_allocator); +#endif + __sched_init_task(t); +} + +#ifdef SCHED_PERCPU + +/* per-CPU task allocator */ + +static struct slab thread_slab; +static struct tcache *task_tcache; +static DEFINE_PERCPU(struct tcache_percpu, task_percpu); + +static __always_inline struct task *__task_create(bool _idle) +{ + struct task *t; + struct stack *s; + + preempt_disable(); + t = tcache_alloc(&percpu_get(task_percpu)); + if (unlikely(!t)) { + preempt_enable(); + return NULL; + } + + s = stack_alloc(); + if (unlikely(!s)) { + tcache_free(&percpu_get(task_percpu), t); + preempt_enable(); + return NULL; + } + preempt_enable(); + + __task_init(t, s); + + log_debug("%s %p %p %d", __func__, t, t->stack, t->id); + + return t; +} + +static __always_inline void __task_free(struct task *t) +{ + stack_free(t->stack); + tcache_free(&percpu_get(task_percpu), t); +} + +static __always_inline int __task_alloc_init_percpu() +{ + tcache_init_percpu(task_tcache, &percpu_get(task_percpu)); + stack_init_percpu(); + + return 0; +} + +static __always_inline int __task_alloc_init(void *base) +{ + int ret; + + if ((ret = stack_init()) < 0) { + log_err("sched_task_init: failed to init stack"); + return ret; + } + + ret = slab_create(&thread_slab, "runtime_threads", sizeof(struct task), 0); + if (ret) + return ret; + + task_tcache = slab_create_tcache(&thread_slab, TCACHE_DEFAULT_MAG_SIZE); + if (!task_tcache) { + log_err("sched_task_init: failed to create task tcache"); + slab_destroy(&thread_slab); + return -ENOMEM; + } + + return 0; +} + +#else + +/* centralized task allocator */ + +struct task_cache { + atomic_int head, tail; + struct task *task[MAX_TASKS_PER_APP]; + struct stack *stack[MAX_TASKS_PER_APP]; +} __aligned_cacheline; +BUILD_ASSERT(is_power_of_two(MAX_TASKS_PER_APP)); +#define TASK_CACHE_MASK (MAX_TASKS_PER_APP - 1) + +static struct task_cache task_cache; + +static __always_inline struct task *__task_create(bool idle) +{ + struct task *t; + struct stack *s; + int i; + + assert_local_irq_disabled(); + BUG_ON(task_cache.head < task_cache.tail); + + i = atomic_inc(&task_cache.head) & TASK_CACHE_MASK; + t = task_cache.task[i]; + s = task_cache.stack[i]; + + __task_init(t, s); + + log_debug("%s %p %p %d", __func__, t, t->stack, t->id); + + return t; +} + +static __always_inline void __task_free(struct task *t) +{ + int i = atomic_inc(&task_cache.tail) & TASK_CACHE_MASK; + task_cache.stack[i] = t->stack; + task_cache.task[i] = t; +} + +static __always_inline int __task_alloc_init(void *base) +{ + int i; + void *stack_base = base + MAX_TASKS_PER_APP * sizeof(struct task); + + for (i = 0; i < MAX_TASKS_PER_APP; i++) { + task_cache.task[i] = base + i * sizeof(struct task); + task_cache.stack[i] = stack_base + i * sizeof(struct stack); + } + + return 0; +} + +static __always_inline int __task_alloc_init_percpu() { return 0; } + +#endif + +struct task *task_create(thread_fn_t fn, void *arg) +{ + uint64_t *rsp; + struct task *task; + struct switch_frame *frame; + + task = __task_create(false); + if (unlikely(!task)) + return NULL; + + rsp = (uint64_t *)stack_top(task->stack); + *--rsp = (uint64_t)task_exit; + frame = (struct switch_frame *)rsp - 1; + frame->rip = (uint64_t)fn; + frame->rdi = (uint64_t)arg; + frame->rbp = 0; + task->rsp = (uint64_t)frame; + + return task; +} + +struct task *task_create_with_buf(thread_fn_t fn, void **buf, size_t buf_len) +{ + + uint64_t rsp, *ptr; + struct switch_frame *frame; + + struct task *task = __task_create(false); + if (unlikely(!task)) + return NULL; + + rsp = stack_top(task->stack); + rsp -= buf_len; + rsp = align_down(rsp, RSP_ALIGNMENT); + *buf = (void *)rsp; + ptr = (uint64_t *)rsp; + *--ptr = (uint64_t)task_exit; + frame = (struct switch_frame *)ptr - 1; + frame->rip = (uint64_t)fn; + frame->rdi = (uint64_t)*buf; + frame->rbp = 0; + task->rsp = (uint64_t)frame; + + return task; +} + +struct task *task_create_idle() +{ + struct task *task; + + task = __task_create(true); + if (unlikely(!task)) + return NULL; + + task->state = TASK_IDLE; + task->rsp = (uint64_t)stack_top(task->stack) - 8; + atomic_inc(&task_count); + return task; +} + +void task_free(struct task *t) +{ + __task_free(t); + atomic_dec(&task_count); + + log_debug("%s %p %p %d", __func__, t, t->stack, t->id); +} + +int sched_task_init_percpu() { return __task_alloc_init_percpu(); } + +int sched_task_init(void *base) +{ + int ret; + + if ((ret = __task_alloc_init(base)) < 0) + return ret; + + atomic_init(&task_id_allocator, 0); + + return 0; +} diff --git a/libos/shim/entry.c b/libos/shim/entry.c new file mode 100644 index 0000000..ce17482 --- /dev/null +++ b/libos/shim/entry.c @@ -0,0 +1,30 @@ + +#include + +#include + +static int main_argc, main_ret; +static char **main_argv; + +int __real_main(int, char **); + +static void __real_entry(void *arg) +{ + main_ret = __real_main(main_argc, main_argv); +} + +int __wrap_main(int argc, char **argv) +{ + int ret; + + main_argv = &argv[0]; + main_argc = argc; + + ret = sl_libos_start(__real_entry, NULL); + if (ret) { + fprintf(stderr, "Failed to start libos\n"); + return ret; + } + + return main_ret; +} diff --git a/libos/shim/hook.h b/libos/shim/hook.h new file mode 100644 index 0000000..1f84e66 --- /dev/null +++ b/libos/shim/hook.h @@ -0,0 +1,64 @@ +/* + * hook.h - support for symbol replacement + */ + +#pragma once + +#include + +#include +#include + +#define HOOK1(__fn, __ret, __arg1) \ + __ret __fn(__arg1 __a1) \ + { \ + static __ret (*real_##__fn)(__arg1); \ + if (unlikely(!real_##__fn)) { \ + real_##__fn = dlsym(RTLD_NEXT, #__fn); \ + } \ + __ret __t = real_##__fn(__a1); \ + return __t; \ + } + +#define HOOK1_NORET(__fn, __arg1) \ + void __fn(__arg1 __a1) \ + { \ + static void (*real_##__fn)(__arg1); \ + if (unlikely(!real_##__fn)) { \ + real_##__fn = dlsym(RTLD_NEXT, #__fn); \ + } \ + real_##__fn(__a1); \ + } + +#define HOOK2(__fn, __ret, __arg1, __arg2) \ + __ret __fn(__arg1 __a1, __arg2 __a2) \ + { \ + static __ret (*real_##__fn)(__arg1, __arg2); \ + if (unlikely(!real_##__fn)) { \ + real_##__fn = dlsym(RTLD_NEXT, #__fn); \ + } \ + __ret __t = real_##__fn(__a1, __a2); \ + return __t; \ + } + +#define HOOK3(__fn, __ret, __arg1, __arg2, __arg3) \ + __ret __fn(__arg1 __a1, __arg2 __a2, __arg3 __a3) \ + { \ + static __ret (*real_##__fn)(__arg1, __arg2, __arg3); \ + if (unlikely(!real_##__fn)) { \ + real_##__fn = dlsym(RTLD_NEXT, #__fn); \ + } \ + __ret __t = real_##__fn(__a1, __a2, __a3); \ + return __t; \ + } + +#define HOOK4(__fn, __ret, __arg1, __arg2, __arg3, __arg4) \ + __ret __fn(__arg1 __a1, __arg2 __a2, __arg3 __a3, __arg4 __a4) \ + { \ + static __ret (*real_##__fn)(__arg1, __arg2, __arg3, __arg4); \ + if (unlikely(!real_##__fn)) { \ + real_##__fn = dlsym(RTLD_NEXT, #__fn); \ + } \ + __ret __t = real_##__fn(__a1, __a2, __a3, __a4); \ + return __t; \ + } diff --git a/libos/shim/pthread.c b/libos/shim/pthread.c new file mode 100644 index 0000000..779e2a5 --- /dev/null +++ b/libos/shim/pthread.c @@ -0,0 +1,165 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct join_handle { + void *(*fn)(void *); + void *args; + void *retval; + spinlock_t lock; + struct task *waiter; + bool detached; +}; + +static void __trampoline(void *arg) +{ + struct join_handle *j = arg; + + j->retval = j->fn(j->args); + spin_lock_np(&j->lock); + if (j->detached) { + spin_unlock_np(&j->lock); + return; + } + if (j->waiter != NULL) { + task_enqueue(0, j->waiter); + } + j->waiter = task_self(); + task_block(&j->lock); +} + +int sl_pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*fn)(void *), void *arg) +{ + struct join_handle **handle = (struct join_handle **)thread; + struct join_handle *j; + struct task *task; + + task = task_create_with_buf(__trampoline, (void **)&j, sizeof(struct join_handle)); + if (unlikely(!task)) + return -ENOMEM; + + j->fn = fn; + j->args = arg; + spin_lock_init(&j->lock); + j->waiter = NULL; + j->detached = false; + + if (handle) + *handle = j; + + if (unlikely(task_enqueue(current_cpu_id(), task))) { + task_free(task); + return -EAGAIN; + } + + return 0; +} + +int sl_pthread_join(pthread_t thread, void **retval) +{ + struct join_handle *j = (struct join_handle *)thread; + + spin_lock_np(&j->lock); + if (j->detached) { + spin_unlock_np(&j->lock); + return -EINVAL; + } + if (j->waiter == NULL) { + j->waiter = task_self(); + task_block(&j->lock); + spin_lock_np(&j->lock); + } + if (retval) + *retval = j->retval; + spin_unlock_np(&j->lock); + return 0; +} + +int sl_pthread_detach(pthread_t thread) +{ + struct join_handle *j = (struct join_handle *)thread; + + spin_lock_np(&j->lock); + if (j->detached) { + spin_unlock_np(&j->lock); + return -EINVAL; + } + j->detached = true; + if (j->waiter) + task_wakeup(j->waiter); + spin_unlock_np(&j->lock); + return 0; +} + +int sl_pthread_yield(void) +{ + task_yield(); + return 0; +} + +pthread_t sl_pthread_self() +{ + return align_down(stack_top(task_self()->stack) - sizeof(struct join_handle), RSP_ALIGNMENT); +} + +void __attribute__((noreturn)) sl_pthread_exit(void *retval) { task_exit(retval); } + +int sl_pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutexattr) +{ + mutex_init((mutex_t *)mutex); + return 0; +} + +int sl_pthread_mutex_lock(pthread_mutex_t *mutex) +{ + mutex_lock((mutex_t *)mutex); + return 0; +} + +int sl_pthread_mutex_trylock(pthread_mutex_t *mutex) +{ + return mutex_try_lock((mutex_t *)mutex) ? 0 : EBUSY; +} + +int sl_pthread_mutex_unlock(pthread_mutex_t *mutex) +{ + mutex_unlock((mutex_t *)mutex); + return 0; +} + +int sl_pthread_mutex_destroy(pthread_mutex_t *mutex) { return 0; } + +int sl_pthread_cond_init(pthread_cond_t *__restrict cond, + const pthread_condattr_t *__restrict cond_attr) +{ + condvar_init((condvar_t *)cond); + return 0; +} + +int sl_pthread_cond_signal(pthread_cond_t *cond) +{ + condvar_signal((condvar_t *)cond); + return 0; +} + +int sl_pthread_cond_broadcast(pthread_cond_t *cond) +{ + condvar_broadcast((condvar_t *)cond); + return 0; +} + +int sl_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) +{ + condvar_wait((condvar_t *)cond, (mutex_t *)mutex); + return 0; +} + +int sl_pthread_cond_destroy(pthread_cond_t *cond) { return 0; } \ No newline at end of file diff --git a/libos/stat.c b/libos/stat.c new file mode 100644 index 0000000..be33c22 --- /dev/null +++ b/libos/stat.c @@ -0,0 +1,20 @@ +#include + +#include +#include +#include +#include + +void print_stats(void) +{ + int i, j; + + printf("%3c", ' '); + for (i = 0; i < STAT_NR; i++) printf("%16s", stat_str(i)); + printf("\n"); + for (i = 0; i < proc->nr_ks; i++) { + printf("%2d:", i); + for (j = 0; j < STAT_NR; j++) printf("%16ld", proc->all_ks[i].stats[j]); + printf("\n"); + } +} diff --git a/libos/sync/rcu.c b/libos/sync/rcu.c new file mode 100644 index 0000000..4b530c3 --- /dev/null +++ b/libos/sync/rcu.c @@ -0,0 +1,150 @@ +/* + * rcu.c - support for read-copy-update + * + * The main challenge of RCU is determining when it's safe to free objects. The + * strategy here is to maintain a per-kthread counter. Whenever the scheduler is + * entered or exited, the counter is incremented. When the count is even, we + * know that either the scheduler loop is still running or the kthread is + * parked. When the count is odd, we know a uthread is currently running. We can + * safely free objects by reading each kthread's counter and then waiting until + * each kthread count is either even & >= the previous value (to detect parking) + * or odd & > the previous value (to detect rescheduling). + * + * FIXME: Freeing objects is expensive with this minimal implementation. This + * should be fine as long as RCU updates are rare. The Linux Kernel uses several + * more optimized strategies that we may want to consider in the future. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* the time RCU waits before checking if it can free objects */ +#define RCU_SLEEP_PERIOD (10 * USEC_PER_MSEC) + +/* Protects @rcu_head. */ +static DEFINE_SPINLOCK(rcu_lock); +/* The head of the list of objects waiting to be freed */ +static struct rcu_head *rcu_head; +static bool rcu_worker_launched; +uint32_t *rcu_gen_percpu[USED_CPUS]; + +#ifdef DEBUG +__thread int rcu_read_count; +#endif /* DEBUG */ + +static void rcu_worker(void *arg) +{ + struct rcu_head *head, *next; + unsigned int last_rcu_gen[USED_CPUS]; + unsigned int gen; + int i; + + log_info("rcu: rcu worker %p", arg); + + while (true) { + /* check if any RCU objects are waiting to be freed */ + spin_lock_np(&rcu_lock); + if (!rcu_head) { + spin_unlock_np(&rcu_lock); + timer_sleep(RCU_SLEEP_PERIOD); + continue; + } + head = rcu_head; + rcu_head = NULL; + spin_unlock_np(&rcu_lock); + + /* read the RCU generation counters */ + for (i = 0; i < USED_CPUS; i++) last_rcu_gen[i] = atomic_load_acq(rcu_gen_percpu[i]); + + while (true) { + /* wait for RCU generation counters to increase */ + timer_sleep(RCU_SLEEP_PERIOD); + + /* read the RCU generation counters again */ + for (i = 0; i < USED_CPUS; i++) { + gen = atomic_load_acq(rcu_gen_percpu[i]); + /* wait for a quiescent period (all passes context switch) */ + if ((gen & 0x1) == 0x1 && gen == last_rcu_gen[i]) { + break; + } + } + + /* did any of the RCU generation checks fail? */ + if (i != USED_CPUS) + continue; + + /* actually free the RCU objects */ + while (head) { + next = head->next; + head->func(head); + head = next; + } + + break; + } + } +} + +/** + * rcu_free - frees an RCU object after the quiescent period + * @head: the RCU head structure embedded within the object + * @func: the release method + */ +void rcu_free(struct rcu_head *head, rcu_callback_t func) +{ + bool launch_worker = false; + + head->func = func; + + spin_lock_np(&rcu_lock); + if (unlikely(!rcu_worker_launched)) + launch_worker = rcu_worker_launched = true; + head->next = rcu_head; + rcu_head = head; + spin_unlock_np(&rcu_lock); + + if (unlikely(launch_worker)) + BUG_ON(sl_task_spawn(rcu_worker, head, 0)); +} + +struct sync_arg { + struct rcu_head rcu; + struct task *task; +}; + +static void synchronize_rcu_finish(struct rcu_head *head) +{ + struct sync_arg *tmp = container_of(head, struct sync_arg, rcu); + task_wakeup(tmp->task); +} + +/** + * synchronize_rcu - blocks until it is safe to free an RCU object + * + * WARNING: Can only be called from thread context. + */ +void synchronize_rcu(void) +{ + bool launch_worker = false; + struct sync_arg tmp; + struct task *task = task_self(); + + tmp.rcu.func = synchronize_rcu_finish; + tmp.task = task; + + spin_lock_np(&rcu_lock); + if (unlikely(!rcu_worker_launched)) + launch_worker = rcu_worker_launched = true; + tmp.rcu.next = rcu_head; + rcu_head = &tmp.rcu; + if (unlikely(launch_worker)) + BUG_ON(sl_task_spawn(rcu_worker, NULL, 0)); + task_block(&rcu_lock); +} diff --git a/libos/sync/signal.c b/libos/sync/signal.c new file mode 100644 index 0000000..c15acb5 --- /dev/null +++ b/libos/sync/signal.c @@ -0,0 +1,25 @@ +#include + +#include +#include +#include + +static void signal_handler(int signum, siginfo_t *info, void *extra) +{ + log_notice("Handle signal: %d tid: %d", signum, _gettid()); + if (signum == SIGINT || signum == SIGTERM) + exit(EXIT_SUCCESS); +} + +int signal_init() +{ + struct sigaction action; + + action.sa_flags = SA_SIGINFO; + action.sa_sigaction = signal_handler; + sigaction(SIGUSR1, &action, NULL); + sigaction(SIGINT, &action, NULL); + sigaction(SIGTERM, &action, NULL); + + return 0; +} \ No newline at end of file diff --git a/libos/sync/sync.c b/libos/sync/sync.c new file mode 100644 index 0000000..bb15e0f --- /dev/null +++ b/libos/sync/sync.c @@ -0,0 +1,329 @@ +#include +#include +#include +#include +#include + +#include + +/* + * Mutex support + */ + +/** + * mutex_try_lock - attempts to acquire a mutex + * @m: the mutex to acquire + * + * Returns true if the acquire was successful. + */ +bool mutex_try_lock(mutex_t *m) +{ + spin_lock_np(&m->waiter_lock); + if (m->held) { + spin_unlock_np(&m->waiter_lock); + return false; + } + m->held = true; + spin_unlock_np(&m->waiter_lock); + return true; +} + +/** + * mutex_lock - acquires a mutex + * @m: the mutex to acquire + */ +void mutex_lock(mutex_t *m) +{ + struct task *task; + + spin_lock_np(&m->waiter_lock); + task = task_self(); + if (!m->held) { + m->held = true; + spin_unlock_np(&m->waiter_lock); + return; + } + list_add_tail(&m->waiters, &task->link); + task_block(&m->waiter_lock); +} + +/** + * mutex_unlock - releases a mutex + * @m: the mutex to release + */ +void mutex_unlock(mutex_t *m) +{ + struct task *task; + + spin_lock_np(&m->waiter_lock); + task = list_pop(&m->waiters, struct task, link); + if (!task) { + m->held = false; + spin_unlock_np(&m->waiter_lock); + return; + } + spin_unlock_np(&m->waiter_lock); + task_wakeup(task); +} + +/** + * mutex_init - initializes a mutex + * @m: the mutex to initialize + */ +void mutex_init(mutex_t *m) +{ + m->held = false; + spin_lock_init(&m->waiter_lock); + list_head_init(&m->waiters); +} + +/* + * Condition variable support + */ + +/** + * condvar_wait - waits for a condition variable to be signalled + * @cv: the condition variable to wait for + * @m: the currently held mutex that projects the condition + */ +void condvar_wait(condvar_t *cv, mutex_t *m) +{ + struct task *task = task_self(); + + assert_mutex_held(m); + spin_lock_np(&cv->waiter_lock); + mutex_unlock(m); + list_add_tail(&cv->waiters, &task->link); + task_block(&cv->waiter_lock); + mutex_lock(m); +} + +/** + * condvar_signal - signals a thread waiting on a condition variable + * @cv: the condition variable to signal + */ +void condvar_signal(condvar_t *cv) +{ + struct task *task; + + spin_lock_np(&cv->waiter_lock); + task = list_pop(&cv->waiters, struct task, link); + spin_unlock_np(&cv->waiter_lock); + if (task) + task_wakeup(task); +} + +/** + * condvar_broadcast - signals all waiting threads on a condition variable + * @cv: the condition variable to signal + */ +void condvar_broadcast(condvar_t *cv) +{ + struct task *task; + struct list_head tmp; + + list_head_init(&tmp); + + spin_lock_np(&cv->waiter_lock); + list_append_list(&tmp, &cv->waiters); + spin_unlock_np(&cv->waiter_lock); + + while (true) { + task = list_pop(&tmp, struct task, link); + if (!task) + break; + task_wakeup(task); + } +} + +/** + * condvar_init - initializes a condition variable + * @cv: the condition variable to initialize + */ +void condvar_init(condvar_t *cv) +{ + spin_lock_init(&cv->waiter_lock); + list_head_init(&cv->waiters); +} + +/* + * Barrier support + */ + +/** + * barrier_init - initializes a barrier + * @b: the wait group to initialize + * @count: number of threads that must wait before releasing + */ +void barrier_init(barrier_t *b, int count) +{ + spin_lock_init(&b->lock); + list_head_init(&b->waiters); + b->count = count; + b->waiting = 0; +} + +/** + * barrier_wait - waits on a barrier + * @b: the barrier to wait on + * + * Returns true if the calling thread releases the barrier + */ +bool barrier_wait(barrier_t *b) +{ + struct task *task; + struct list_head tmp; + + list_head_init(&tmp); + + spin_lock_np(&b->lock); + + if (++b->waiting >= b->count) { + list_append_list(&tmp, &b->waiters); + b->waiting = 0; + spin_unlock_np(&b->lock); + while (true) { + task = list_pop(&tmp, struct task, link); + if (!task) + break; + task_wakeup(task); + } + return true; + } + + task = task_self(); + list_add_tail(&b->waiters, &task->link); + task_block(&b->lock); + return false; +} + +/* + * Wait group support + */ + +/** + * waitgroup_add - adds or removes waiters from a wait group + * @wg: the wait group to update + * @cnt: the count to add to the waitgroup (can be negative) + * + * If the wait groups internal count reaches zero, the waiting thread (if it + * exists) will be signalled. The wait group must be incremented at least once + * before calling waitgroup_wait(). + */ +void waitgroup_add(waitgroup_t *wg, int cnt) +{ + struct task *task; + struct list_head tmp; + + list_head_init(&tmp); + + spin_lock_np(&wg->lock); + wg->cnt += cnt; + BUG_ON(wg->cnt < 0); + if (wg->cnt == 0) + list_append_list(&tmp, &wg->waiters); + spin_unlock_np(&wg->lock); + + while (true) { + task = list_pop(&tmp, struct task, link); + if (!task) + break; + task_wakeup(task); + } +} + +/** + * waitgroup_wait - waits for the wait group count to become zero + * @wg: the wait group to wait on + */ +void waitgroup_wait(waitgroup_t *wg) +{ + struct task *task; + + spin_lock_np(&wg->lock); + task = task_self(); + if (wg->cnt == 0) { + spin_unlock_np(&wg->lock); + return; + } + list_add_tail(&wg->waiters, &task->link); + task_block(&wg->lock); +} + +/** + * waitgroup_init - initializes a wait group + * @wg: the wait group to initialize + */ +void waitgroup_init(waitgroup_t *wg) +{ + spin_lock_init(&wg->lock); + list_head_init(&wg->waiters); + wg->cnt = 0; +} + +struct futex { + int *uaddr; + struct list_node link; + struct task *task; +}; + +DEFINE_LIST_HEAD(futex_list); +DEFINE_SPINLOCK(futex_lock); + +int futex_wait(int *uaddr, int val) +{ + struct futex futex = {.uaddr = uaddr, .task = task_self()}; + + spin_lock_np(&futex_lock); + if (*uaddr != val) { + spin_unlock_np(&futex_lock); + return -EAGAIN; + } + + list_add_tail(&futex_list, &futex.link); + task_block(&futex_lock); + return 0; +} + +int futex_wake(int *uaddr, int val) +{ + int count = 0; + struct futex *f; + + spin_lock_np(&futex_lock); + list_for_each(&futex_list, f, link) + { + if (f->uaddr == uaddr) { + list_del(&f->link); + task_wakeup(f->task); + if (++count == val) + break; + } + } + spin_unlock_np(&futex_lock); + return count; +} + +int __api sl_futex(int *uaddr, int op, int val, const struct timespec *timeout, int *uaddr2, + int val3) +{ + int cmd = op & FUTEX_CMD_MASK; + switch (cmd) { + case FUTEX_WAIT: + if (timeout) { + log_warn("FUTEX_WAIT with timeout is not supported"); + } + int ret = futex_wait(uaddr, val); + return ret; + case FUTEX_WAKE: + return futex_wake(uaddr, val); + default: + return -1; + } +} + +void init_sync() +{ + spin_lock_init(&futex_lock); + list_head_init(&futex_list); +} diff --git a/libos/sync/timer.c b/libos/sync/timer.c new file mode 100644 index 0000000..58a02fc --- /dev/null +++ b/libos/sync/timer.c @@ -0,0 +1,340 @@ +/* + * timer.c - support for timers + * + * So far we use a D-ary heap just like the Go runtime. We may want to consider + * adding a lower-resolution shared timer wheel as well. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* the arity of the heap */ +#define D 4 + +/** + * is_valid_heap - checks that the timer heap is a valid min heap + * @heap: the timer heap + * @n: the number of timers in the heap + * + * Returns true if valid, false otherwise. + */ +static bool is_valid_heap(struct timer_idx *heap, int n) +{ + int i, p; + + /* check that each timer's deadline is later or equal to its parent's + * deadline */ + for (i = n - 1; i > 1; i--) { + p = (i - 1) / D; + if (heap[p].deadline_us > heap[i].deadline_us) + return false; + } + + return true; +} + +/** + * timer_heap_is_valid - checks that this kthread's timer heap is a + * valid min heap + * @k: the kthread + */ +static void assert_timer_heap_is_valid(struct kthread *k) +{ + assert(is_valid_heap(k->timers, k->nr_timers)); +} + +static void sift_up(struct timer_idx *heap, int i) +{ + struct timer_idx tmp = heap[i]; + int p; + + while (i > 0) { + p = (i - 1) / D; + if (tmp.deadline_us >= heap[p].deadline_us) + break; + heap[i] = heap[p]; + heap[i].e->idx = i; + heap[p] = tmp; + heap[p].e->idx = p; + i = p; + } +} + +static void sift_down(struct timer_idx *heap, int i, int n) +{ + struct timer_idx tmp = heap[i]; + uint64_t w; + int c, j; + + while (1) { + w = tmp.deadline_us; + c = INT_MAX; + for (j = (i * D + 1); j <= (i * D + D); j++) { + if (j >= n) + break; + if (heap[j].deadline_us < w) { + w = heap[j].deadline_us; + c = j; + } + } + if (c == INT_MAX) + break; + heap[i] = heap[c]; + heap[i].e->idx = i; + heap[c] = tmp; + heap[c].e->idx = c; + i = c; + } +} + +/** + * timer_merge - merges a timer heap from another kthread into our timer heap + * @r: the remote kthread whose timer heap we will absorb + */ +void timer_merge(struct kthread *r) +{ + struct kthread *k = thisk(); + int i; + + spin_lock(&k->timer_lock); + spin_lock(&r->timer_lock); + + if (r->nr_timers == 0) { + spin_unlock(&r->timer_lock); + goto done; + } + + /* move all timers from r to the end of our array */ + for (i = 0; i < r->nr_timers; i++) { + k->timers[k->nr_timers] = r->timers[i]; + k->timers[k->nr_timers].e->idx = k->nr_timers; + k->timers[k->nr_timers].e->k = k; + k->nr_timers++; + + if (k->nr_timers >= MAX_TIMERS) + BUG(); + } + r->nr_timers = 0; + spin_unlock(&r->timer_lock); + + /* + * Restore heap order by sifting each non-leaf element downward, + * starting from the bottom of the heap and working upward (runs in + * linear time). + */ + for (i = k->nr_timers / D; i >= 0; i--) sift_down(k->timers, i, k->nr_timers); + +done: + spin_unlock(&k->timer_lock); +} + +/** + * timer_earliest_deadline - return the first deadline for this kthread or 0 if + * there are no active timers. + */ +uint64_t timer_earliest_deadline() +{ + struct kthread *k = thisk(); + uint64_t deadline_us; + + /* deliberate race condition */ + if (k->nr_timers == 0) + deadline_us = 0; + else + deadline_us = k->timers[0].deadline_us; + + return deadline_us; +} + +static void timer_start_locked(struct timer_entry *e, uint64_t deadline_us) +{ + struct kthread *k = thisk(); + int i; + + assert_spin_lock_held(&k->timer_lock); + + /* can't insert a timer twice! */ + BUG_ON(e->armed); + + i = k->nr_timers++; + if (k->nr_timers >= MAX_TIMERS) { + /* TODO: support unlimited timers */ + BUG(); + } + + k->timers[i].deadline_us = deadline_us; + k->timers[i].e = e; + e->idx = i; + e->k = k; + sift_up(k->timers, i); + e->armed = true; +} + +/** + * timer_start - arms a timer + * @e: the timer entry to start + * @deadline_us: the deadline in microseconds + * + * @e must have been initialized with timer_init(). + */ +void timer_start(struct timer_entry *e, uint64_t deadline_us) +{ + struct kthread *k = thisk(); + + spin_lock_np(&k->timer_lock); + timer_start_locked(e, deadline_us); + spin_unlock_np(&k->timer_lock); + putk(); +} + +/** + * timer_cancel - cancels a timer + * @e: the timer entry to cancel + * + * Returns true if the timer was successfully cancelled, otherwise it has + * already fired or was never armed. + */ +bool timer_cancel(struct timer_entry *e) +{ + struct kthread *k; + uint32_t last; + +try_again: + preempt_disable(); + k = atomic_load_acq(&e->k); + + spin_lock_np(&k->timer_lock); + + if (e->k != k) { + /* Timer was merged to a different heap */ + spin_unlock_np(&k->timer_lock); + preempt_enable(); + goto try_again; + } + + if (!e->armed) { + spin_unlock_np(&k->timer_lock); + preempt_enable(); + return false; + } + e->armed = false; + + last = --k->nr_timers; + if (e->idx == last) { + spin_unlock_np(&k->timer_lock); + preempt_enable(); + return true; + } + + k->timers[e->idx] = k->timers[last]; + k->timers[e->idx].e->idx = e->idx; + sift_up(k->timers, e->idx); + sift_down(k->timers, e->idx, k->nr_timers); + spin_unlock_np(&k->timer_lock); + + preempt_enable(); + return true; +} + +static void timer_finish_sleep(unsigned long arg) { task_wakeup((struct task *)arg); } + +static void __timer_sleep(uint64_t deadline_us) +{ +#ifdef SKYLOFT_TIMER + struct kthread *k = thisk(); + struct timer_entry e; + + timer_init(&e, timer_finish_sleep, (unsigned long)task_self()); + + spin_lock_np(&k->timer_lock); + timer_start_locked(&e, deadline_us); + task_block(&k->timer_lock); +#else + while (now_us() < deadline_us) { + task_yield(); + } +#endif +} + +/** + * timer_sleep_until - sleeps until a deadline + * @deadline_us: the deadline time in microseconds + */ +void timer_sleep_until(uint64_t deadline_us) +{ + if (unlikely(now_us() >= deadline_us)) + return; + + __timer_sleep(deadline_us); +} + +/** + * timer_sleep - sleeps for a duration + * @duration_us: the duration time in microseconds + */ +void timer_sleep(uint64_t duration_us) { __timer_sleep(now_us() + duration_us); } + +void __api sl_sleep(int secs) { return timer_sleep(secs * USEC_PER_SEC); } + +void __api sl_usleep(int usecs) { return timer_sleep(usecs); } + +/** + * timer_softirq - handles expired timers + * @k: the kthread to check + * @budget: the maximum number of timers to handle + */ +void timer_softirq(struct kthread *k, unsigned int budget) +{ + struct timer_entry *e; + uint64_t now; + int i; + + spin_lock_np(&k->timer_lock); + assert_timer_heap_is_valid(k); + + now = now_us(); + while ((budget-- > 0) && k->nr_timers > 0 && k->timers[0].deadline_us <= now) { + i = --k->nr_timers; + e = k->timers[0].e; + if (i > 0) { + k->timers[0] = k->timers[i]; + k->timers[0].e->idx = 0; + sift_down(k->timers, 0, i); + } + spin_unlock_np(&k->timer_lock); + + /* execute the timer handler */ + e->fn(e->arg); + + spin_lock_np(&k->timer_lock); + now = now_us(); + } + + spin_unlock_np(&k->timer_lock); +} + +/** + * timer_init_percpu - initializes percpu timer state + * + * Returns 0 if successful, otherwise fail. + */ +int timer_init_percpu(void) +{ + struct kthread *k = thisk(); + + k->timers = aligned_alloc(CACHE_LINE_SIZE, + align_up(sizeof(struct timer_idx) * MAX_TIMERS, CACHE_LINE_SIZE)); + if (!k->timers) + return -ENOMEM; + + return 0; +} diff --git a/microbench/.gitignore b/microbench/.gitignore new file mode 100644 index 0000000..c06c6e9 --- /dev/null +++ b/microbench/.gitignore @@ -0,0 +1,7 @@ +uipi_send_recv +uipi_delivery +utimer_recv +signal_send_recv +signal_delivery +setitimer_recv +kipi_send_recv diff --git a/microbench/Makefile b/microbench/Makefile new file mode 100644 index 0000000..9a3ed50 --- /dev/null +++ b/microbench/Makefile @@ -0,0 +1,15 @@ +CC := gcc + +CFLAGS += -Wall -g -D_GNU_SOURCE -DSKYLOFT_UINTR -muintr -O3 +CFLAGS += -I../utils/include -I../include +LDLIBS += -lpthread + +TARGETS := uipi_send_recv uipi_delivery utimer_recv signal_send_recv signal_delivery setitimer_recv kipi_send_recv + +all: $(TARGETS) + +%: %.c + $(CC) $(CFLAGS) -o $@ $^ $(LDLIBS) + +clean: + rm -rf $(TARGETS) diff --git a/microbench/common.h b/microbench/common.h new file mode 100644 index 0000000..ee8a9af --- /dev/null +++ b/microbench/common.h @@ -0,0 +1,9 @@ +#pragma once + +#define DIV_ROUND(sum, count) (((sum) + (count) / 2) / (count)) + +static inline void dummy_work(uint64_t iter) { + while (iter--) { + asm volatile("nop"); + } +} diff --git a/microbench/kipi_send_recv.c b/microbench/kipi_send_recv.c new file mode 100644 index 0000000..56fafe1 --- /dev/null +++ b/microbench/kipi_send_recv.c @@ -0,0 +1,122 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "common.h" + +#define MASTER_CPU 2 +#define SLAVE_CPU 3 + +#define WARMUP 10000 +#define ITER 10000000 +#define WORKS 2e9 +#define UVEC 1 + +#define now now_tsc + +static uint64_t kipi_recv_lat = 0; +static atomic_int barrier = 0; + +static volatile int counter = 0; +static volatile int init_ok = 0; +static volatile int send_ok = 0; +static int uintr_fd; +static int uintr_index; +static int dev_fd; + +static void __attribute__((interrupt)) +__attribute__((target("general-regs-only", "inline-all-stringops"))) +uintr_handler(struct __uintr_frame *ui_frame, unsigned long long vector) +{ + asm volatile("" : : :"rdx", "rcx", "rsi", "rdi", + "r8", "r9", "r10", "r11"); + counter++; +} + +static void *sender_thread(void *arg) +{ + bind_to_cpu(SLAVE_CPU); + printf("sender run on CPU: %d\n", sched_getcpu()); + + atomic_fetch_add(&barrier, 1); + while (atomic_load(&barrier) != 2); + + ioctl(dev_fd, SKYLOFT_IO_IPI_BENCH, MASTER_CPU); + + atomic_fetch_add(&barrier, 1); + while (atomic_load(&barrier) != 4); + + return NULL; +} + +int main(int argc, char **argv) +{ + int ret = 0; + + // Create the sender thread before binding CPU. + pthread_t sender; + pthread_create(&sender, NULL, sender_thread, NULL); + + bind_to_cpu(MASTER_CPU); + printf("run on CPU: %d\n", sched_getcpu()); + + uint64_t work_time = now(); + dummy_work(WORKS); + work_time = now() - work_time; + printf("work time = %ld cycles\n", work_time); + + dev_fd = open("/dev/skyloft", O_RDWR); + if (dev_fd < 0) { + printf("Failed to open the device\n"); + exit(EXIT_FAILURE); + } + + ret = uintr_register_handler(uintr_handler, 0); + if (ret < 0) { + printf("Failed to register a handler %d\n", ret); + exit(EXIT_FAILURE); + } + + uintr_fd = uintr_vector_fd(UVEC, 0); + if (uintr_fd < 0) { + printf("Failed to create a vector\n"); + exit(EXIT_FAILURE); + } + + uintr_index = uintr_register_sender(uintr_fd, 0); + if (uintr_index < 0) { + printf("Failed to register a sender %d\n", uintr_index); + exit(EXIT_FAILURE); + } + + atomic_fetch_add(&barrier, 1); + while (atomic_load(&barrier) != 2); + + kipi_recv_lat = now_tsc(); + dummy_work(WORKS); + kipi_recv_lat = now_tsc() - kipi_recv_lat - work_time; + + atomic_fetch_add(&barrier, 1); + while (atomic_load(&barrier) != 4); + + printf("kipi recv total latency = %ld cycles\n", kipi_recv_lat); + + /** clean up */ + uintr_unregister_handler(0); + + return 0; +} diff --git a/microbench/setitimer_recv.c b/microbench/setitimer_recv.c new file mode 100644 index 0000000..beab841 --- /dev/null +++ b/microbench/setitimer_recv.c @@ -0,0 +1,69 @@ +#include +#include +#include +#include +#include + +#include +#include + +#include "common.h" + +#define MASTER_CPU 25 + +#define ITER 1000000 +#define WORKS 2e9 + +#define now now_tsc + +static volatile int recv_count = 0; + +static uint64_t recv_lat; + +void signal_handler() +{ + recv_count++; +} + +void setup_signal_handler(void) +{ + struct sigaction action; + action.sa_flags = 0; + action.sa_handler = signal_handler; + sigaction(SIGALRM, &action, NULL); + + struct itimerval timer; + timer.it_interval.tv_sec = 0; + timer.it_interval.tv_usec = 5; + timer.it_value.tv_sec = 0; + timer.it_value.tv_usec = 1; + setitimer(ITIMER_REAL, &timer, NULL); +} + +void clear_signal_handler(void) +{ + sigaction(SIGALRM, NULL, NULL); +} + +int main() +{ + bind_to_cpu(MASTER_CPU); + printf("run on CPU: %d\n", sched_getcpu()); + + uint64_t work_time = now(); + dummy_work(WORKS); + work_time = now() - work_time; + printf("work time = %ld cycles\n", work_time); + + setup_signal_handler(); + + recv_lat = now(); + dummy_work(WORKS); + recv_lat = now() - recv_lat - work_time; + clear_signal_handler(); + + printf("count = %d\n", recv_count); + printf("recv latency = %ld cycles\n", DIV_ROUND(recv_lat, recv_count)); + + return 0; +} diff --git a/microbench/signal_delivery.c b/microbench/signal_delivery.c new file mode 100644 index 0000000..4acbcce --- /dev/null +++ b/microbench/signal_delivery.c @@ -0,0 +1,85 @@ +#include +#include +#include +#include + +#include +#include + +#include "common.h" + +#define MASTER_CPU 25 +#define SLAVE_CPU 26 + +#define ITER 1000000 + +#define now now_tsc + +static volatile int recv_count = 0; +static volatile int init_ok = 0; +static volatile int send_ok = 0; + +static uint64_t time_before_send, send_recv_lat; +static pid_t receiver_tid; + +void signal_handler() +{ + send_recv_lat += now() - time_before_send; + recv_count++; +} + +void setup_signal_handler(void) +{ + struct sigaction action; + action.sa_flags = 0; + action.sa_handler = signal_handler; + sigaction(SIGUSR1, &action, NULL); +} + +static inline void do_send(int iter, pid_t target) +{ + for (int i = 0; i < iter; i++) { + time_before_send = now(); + kill(target, SIGUSR1); + while (recv_count <= i); + } +} + +static void *sender_thread(void *arg) +{ + bind_to_cpu(SLAVE_CPU); + printf("sender %d run on CPU: %d\n", _gettid(), sched_getcpu()); + + sigset_t signal_set; + sigemptyset(&signal_set); + sigaddset(&signal_set, SIGUSR1); + sigprocmask(SIG_BLOCK, &signal_set, NULL); + + while (!init_ok); + + do_send(ITER, receiver_tid); + send_ok = 1; + + return NULL; +} + +int main() +{ + // Create the sender thread before binding CPU. + pthread_t sender; + pthread_create(&sender, NULL, sender_thread, NULL); + + bind_to_cpu(MASTER_CPU); + receiver_tid = _gettid(); + printf("receiver %d run on CPU: %d\n", receiver_tid, sched_getcpu()); + + setup_signal_handler(); + init_ok = 1; + + while (recv_count < ITER); + + printf("sent %d signals, received %d signals\n", ITER, recv_count); + printf("send-recv latency = %ld cycles\n", DIV_ROUND(send_recv_lat, recv_count)); + + return 0; +} diff --git a/microbench/signal_send_recv.c b/microbench/signal_send_recv.c new file mode 100644 index 0000000..68c2a54 --- /dev/null +++ b/microbench/signal_send_recv.c @@ -0,0 +1,95 @@ +#include +#include +#include +#include + +#include +#include + +#include "common.h" + +#define MASTER_CPU 25 +#define SLAVE_CPU 26 + +#define ITER 1000000 +#define WORKS 2e9 + +#define now now_tsc + +static volatile int recv_count = 0; +static volatile int init_ok = 0; +static volatile int send_ok = 0; + +static uint64_t send_lat, recv_lat; +static pid_t receiver_tid; + +void signal_handler() +{ + recv_count++; +} + +void setup_signal_handler(void) +{ + struct sigaction action; + action.sa_flags = 0; + action.sa_handler = signal_handler; + sigaction(SIGUSR1, &action, NULL); +} + +static inline void do_send(int iter, pid_t target) +{ + for (int i = 0; i < iter; i++) { + kill(target, SIGUSR1); + } +} + +static void *sender_thread(void *arg) +{ + bind_to_cpu(SLAVE_CPU); + printf("sender %d run on CPU: %d\n", _gettid(), sched_getcpu()); + + sigset_t signal_set; + sigemptyset(&signal_set); + sigaddset(&signal_set, SIGUSR1); + sigprocmask(SIG_BLOCK, &signal_set, NULL); + + while (!init_ok); + + send_lat = now(); + do_send(ITER, receiver_tid); + send_lat = now() - send_lat; + send_ok = 1; + + return NULL; +} + +int main() +{ + // Create the sender thread before binding CPU. + pthread_t sender; + pthread_create(&sender, NULL, sender_thread, NULL); + + bind_to_cpu(MASTER_CPU); + receiver_tid = _gettid(); + printf("receiver %d run on CPU: %d\n", receiver_tid, sched_getcpu()); + + uint64_t work_time = now(); + dummy_work(WORKS); + work_time = now() - work_time; + printf("work time = %ld cycles\n", work_time); + + setup_signal_handler(); + init_ok = 1; + + recv_lat = now(); + dummy_work(WORKS); + recv_lat = now() - recv_lat - work_time; + + assert(send_ok); + + printf("sent %d signals, received %d signals\n", ITER, recv_count); + printf("send latency = %ld cycles\n", DIV_ROUND(send_lat, ITER)); + printf("recv latency = %ld cycles\n", DIV_ROUND(recv_lat, recv_count)); + + return 0; +} diff --git a/microbench/uipi_delivery.c b/microbench/uipi_delivery.c new file mode 100644 index 0000000..d7a479c --- /dev/null +++ b/microbench/uipi_delivery.c @@ -0,0 +1,123 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "common.h" + +#define MASTER_CPU 2 +#define SLAVE_CPU 3 + +#define WARMUP 10000 +#define ITER 1000000 +#define UVEC 1 + +static uint64_t time_before_send; +static uint64_t send_recv_lat = 0; + +static volatile int counter = 0; +static volatile int init_ok = 0; +static int uintr_fd; +static int uintr_index; +static volatile int dev_fd; + +static void __attribute__((interrupt)) +__attribute__((target("general-regs-only", "inline-all-stringops"))) +uintr_handler(struct __uintr_frame *ui_frame, unsigned long long vector) +{ + if (vector == UVEC) { + send_recv_lat += now_tsc() - time_before_send; + counter++; + } +} + + +static void *sender_thread(void *arg) +{ + bind_to_cpu(SLAVE_CPU); + printf("Sender runs on %d\n", sched_getcpu()); + + while (!init_ok); + + for (int i = 0; i < ITER; i++) { + time_before_send = now_tsc(); + _senduipi(uintr_index); + while (counter <= i); + } + + return NULL; +} + +static void bench_rdtsc() +{ + uint64_t sum = 0; + for (int i = 0; i < ITER; i++) { + uint64_t a = now_tsc(); + uint64_t b = now_tsc(); + sum += b - a; + } + printf("rdtsc latency = %ld cycles\n", DIV_ROUND(sum, ITER)); +} + +int main(int argc, char **argv) +{ + int ret = 0; + + // Create the sender thread before binding CPU. + pthread_t sender; + pthread_create(&sender, NULL, sender_thread, NULL); + + bind_to_cpu(MASTER_CPU); + printf("Receiver runs on CPU: %d\n", sched_getcpu()); + + dev_fd = open("/dev/skyloft", O_RDWR); + if (dev_fd < 0) { + printf("Failed to open the device\n"); + exit(EXIT_FAILURE); + } + + bench_rdtsc(); + + ret = uintr_register_handler(uintr_handler, 0); + if (ret < 0) { + printf("Failed to register a handler %d\n", ret); + exit(EXIT_FAILURE); + } + + uintr_fd = uintr_vector_fd(UVEC, 0); + if (uintr_fd < 0) { + printf("Failed to create a vector\n"); + exit(EXIT_FAILURE); + } + + uintr_index = uintr_register_sender(uintr_fd, 0); + if (uintr_index < 0) { + printf("Failed to register a sender %d\n", uintr_index); + exit(EXIT_FAILURE); + } + + init_ok = 1; + + _stui(); + while (counter < ITER); + + send_recv_lat = DIV_ROUND(send_recv_lat, ITER); + printf("count = %d\n", counter); + printf("delivery latency = %ld cycles\n", send_recv_lat); + + /** clean up */ + uintr_unregister_handler(0); + + return 0; +} diff --git a/microbench/uipi_send_recv.c b/microbench/uipi_send_recv.c new file mode 100644 index 0000000..fa9ca46 --- /dev/null +++ b/microbench/uipi_send_recv.c @@ -0,0 +1,139 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "common.h" + +#define MASTER_CPU 2 +#define SLAVE_CPU 3 + +#define WARMUP 10000 +#define ITER 10000000 +#define WORKS 2e9 +#define UVEC 1 + +#define now now_tsc + +static uint64_t send_lat = 0; +static uint64_t recv_lat = 0; + +static volatile int counter = 0; +static volatile int init_ok = 0; +static volatile int send_ok = 0; +static int uintr_fd; +static int uintr_index; +static int dev_fd; + +static void __attribute__((interrupt)) +__attribute__((target("general-regs-only", "inline-all-stringops"))) +uintr_handler(struct __uintr_frame *ui_frame, unsigned long long vector) +{ + asm volatile("" : : :"rdx", "rcx", "rsi", "rdi", + "r8", "r9", "r10", "r11"); + counter++; +} + +static void *sender_thread(void *arg) +{ + bind_to_cpu(SLAVE_CPU); + printf("sender run on CPU: %d\n", sched_getcpu()); + + while (!init_ok); + + send_lat = now(); + for (int i = 0; i < ITER; i++) { + _senduipi(uintr_index); + } + send_lat = now() - send_lat; + send_ok = 1; + + return NULL; +} + +static void bench_rdtsc() +{ + uint64_t sum = 0; + for (int i = 0; i < ITER; i++) { + uint64_t a = now_tsc(); + uint64_t b = now_tsc(); + sum += b - a; + } + printf("rdtsc latency = %ld cycles\n", DIV_ROUND(sum, ITER)); +} + +int main(int argc, char **argv) +{ + int ret = 0; + + // Create the sender thread before binding CPU. + pthread_t sender; + pthread_create(&sender, NULL, sender_thread, NULL); + + bind_to_cpu(MASTER_CPU); + printf("run on CPU: %d\n", sched_getcpu()); + + uint64_t work_time = now(); + dummy_work(WORKS); + work_time = now() - work_time; + printf("work time = %ld cycles\n", work_time); + + dev_fd = open("/dev/skyloft", O_RDWR); + if (dev_fd < 0) { + printf("Failed to open the device\n"); + exit(EXIT_FAILURE); + } + + bench_rdtsc(); + + ret = uintr_register_handler(uintr_handler, 0); + if (ret < 0) { + printf("Failed to register a handler %d\n", ret); + exit(EXIT_FAILURE); + } + + uintr_fd = uintr_vector_fd(UVEC, 0); + if (uintr_fd < 0) { + printf("Failed to create a vector\n"); + exit(EXIT_FAILURE); + } + + uintr_index = uintr_register_sender(uintr_fd, 0); + if (uintr_index < 0) { + printf("Failed to register a sender %d\n", uintr_index); + exit(EXIT_FAILURE); + } + + init_ok = 1; + + _stui(); + recv_lat = now(); + dummy_work(WORKS); + recv_lat = now() - recv_lat - work_time; + _clui(); + + assert(send_ok); + + printf("count = %d\n", counter); + send_lat = DIV_ROUND(send_lat, ITER); + recv_lat = DIV_ROUND(recv_lat, counter); + printf("send latency = %ld cycles\n", send_lat); + printf("recv latency = %ld cycles\n", recv_lat); + + /** clean up */ + uintr_unregister_handler(0); + + return 0; +} diff --git a/microbench/utimer_recv.c b/microbench/utimer_recv.c new file mode 100644 index 0000000..2508726 --- /dev/null +++ b/microbench/utimer_recv.c @@ -0,0 +1,100 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "common.h" + +#define MASTER_CPU 2 + +#define RUN_TIME_MS 1000 // 1s +#define TIMER_HZ 1000000 // 1us +#define WORKS 2e9 +#define UVEC 1 + +#define now now_tsc + +static uint64_t recv_lat = 0; +static volatile int counter = 0; + +static int dev_fd; +static int uintr_index; + +static void __attribute__((interrupt)) +__attribute__((target("general-regs-only", "inline-all-stringops"))) +uintr_handler(struct __uintr_frame *ui_frame, unsigned long long vector) +{ + _senduipi(uintr_index); + asm volatile("" : : :"rdx", "rcx", "rsi", "rdi", + "r8", "r9", "r10", "r11"); + counter++; +} + +int main(int argc, char **argv) +{ + int ret = 0; + + bind_to_cpu(MASTER_CPU); + printf("run on CPU: %d\n", sched_getcpu()); + + uint64_t work_time = now(); + dummy_work(WORKS); + work_time = now() - work_time; + printf("work time = %ld cycles\n", work_time); + + ret = uintr_register_handler(uintr_handler, 0); + if (ret < 0) { + printf("Failed to register a handler %d\n", ret); + exit(EXIT_FAILURE); + } + + uintr_index = uintr_register_self(UVEC, 0); + if (uintr_index < 0) { + printf("Failed to register the sender for self\n"); + exit(EXIT_FAILURE); + } + dev_fd = open("/dev/skyloft", O_RDWR); + if (dev_fd < 0) { + printf("Failed to open the device\n"); + exit(EXIT_FAILURE); + } + + ret = ioctl(dev_fd, SKYLOFT_IO_SETUP_DEVICE_UINTR, 1); // flags=1: bind timer + if (ret < 0) { + perror("Failed to register skyloft\n"); + exit(EXIT_FAILURE); + } + + ret = ioctl(dev_fd, SKYLOFT_IO_TIMER_SET_HZ, TIMER_HZ); + if (ret < 0) { + perror("Failed to set timer frequency\n"); + exit(EXIT_FAILURE); + } + + _senduipi(uintr_index); + _stui(); + recv_lat = now(); + dummy_work(WORKS); + recv_lat = now() - recv_lat - work_time; + _clui(); + + printf("count = %d\n", counter); + recv_lat = DIV_ROUND(recv_lat, counter); + printf("recv latency = %ld cycles\n", recv_lat); + + /** clean up */ + uintr_unregister_handler(0); + + return 0; +} diff --git a/paper_results/memcached/USR/shenango b/paper_results/memcached/USR/shenango new file mode 100644 index 0000000..f834590 --- /dev/null +++ b/paper_results/memcached/USR/shenango @@ -0,0 +1,30 @@ +zero, 100277, 100277, 0, 47, 15.0, 17.2, 21.9, 28.5, 32.0, 1713380539 +zero, 200630, 200630, 0, 61, 14.9, 17.1, 25.2, 29.7, 33.1, 1713380550 +zero, 301433, 301433, 0, 98, 14.9, 18.4, 27.8, 33.0, 52.3, 1713380562 +zero, 399838, 399838, 0, 375, 15.0, 19.2, 29.5, 34.9, 40.9, 1713380575 +zero, 500633, 500633, 0, 139, 15.2, 20.1, 31.4, 37.3, 42.6, 1713380589 +zero, 600199, 600199, 0, 146, 15.6, 21.3, 32.7, 39.7, 81.3, 1713380604 +zero, 699079, 699079, 0, 169, 16.2, 22.6, 34.0, 41.3, 50.5, 1713380620 +zero, 798837, 798837, 0, 220, 16.9, 24.4, 35.5, 43.7, 78.6, 1713380637 +zero, 900088, 900088, 0, 230, 17.1, 25.7, 36.6, 45.8, 94.3, 1713380654 +zero, 999462, 999462, 0, 272, 17.4, 26.9, 38.0, 47.3, 57.4, 1713380673 +zero, 1100067, 1100067, 0, 268, 17.5, 27.5, 39.7, 50.1, 142.7, 1713380692 +zero, 1200338, 1200338, 0, 760, 17.7, 27.8, 40.5, 51.0, 1180.3, 1713380712 +zero, 1301613, 1301613, 0, 393, 18.4, 28.9, 43.3, 54.7, 71.3, 1713380734 +zero, 1400619, 1400619, 0, 335, 18.3, 28.1, 43.0, 54.1, 67.8, 1713382749 +zero, 1500380, 1500380, 0, 868, 19.0, 28.7, 45.1, 58.2, 3303.2, 1713380779 +zero, 1598894, 1598894, 0, 1153, 19.1, 29.1, 46.2, 61.7, 2244.7, 1713380803 +zero, 1698703, 1698703, 0, 1448, 19.8, 30.1, 46.9, 63.1, 2034.6, 1713380828 +zero, 1803139, 1803139, 0, 627, 21.0, 32.3, 50.6, 68.5, 85.2, 1713380854 +zero, 1900324, 1900324, 0, 689, 21.4, 32.7, 52.1, 73.5, 100.1, 1713380881 +zero, 2000160, 2000160, 0, 716, 22.7, 34.9, 55.2, 82.5, 128.5, 1713380908 +zero, 2100638, 2100638, 0, 798, 23.7, 37.1, 59.8, 87.6, 126.8, 1713380937 +zero, 2199073, 2199073, 0, 808, 24.8, 41.2, 68.3, 97.0, 130.9, 1713380967 +zero, 2298869, 2298869, 0, 959, 27.5, 51.2, 92.0, 131.0, 164.8, 1713380997 +zero, 2395115, 2395115, 0, 1687, 29.8, 73.8, 136.4, 194.1, 262.7, 1713383023 +zero, 2498092, 2498092, 0, 1963, 36.2, 152.6, 295.6, 427.7, 578.4, 1713381060 +zero, 2593320, 2593320, 0, 3217, 42.6, 263.9, 488.0, 712.8, 1096.9, 1713381093 +zero, 2692386, 2692386, 0, 5017, 80.0, 381.2, 726.1, 1019.2, 1372.4, 1713381127 +zero, 2790434, 2790434, 0, 6296, 90.8, 504.6, 1051.2, 1626.7, 2077.8, 1713383158 +zero, 2890676, 2890676, 0, 8951, 162.8, 689.5, 1411.2, 2309.7, 3877.4, 1713383195 +zero, 2981539, 2981539, 0, 15302, 181.2, 802.7, 1720.7, 2923.3, 4160.9, 1713383232 diff --git a/paper_results/memcached/USR/skyloft b/paper_results/memcached/USR/skyloft new file mode 100644 index 0000000..91927cf --- /dev/null +++ b/paper_results/memcached/USR/skyloft @@ -0,0 +1,30 @@ +zero, 100464, 100464, 0, 38, 13.2, 14.7, 16.9, 19.1, 25.7, 1713381513 +zero, 199682, 199682, 0, 54, 13.3, 14.7, 17.2, 19.8, 26.5, 1713381524 +zero, 300152, 300152, 0, 80, 13.3, 14.6, 17.2, 19.6, 23.6, 1713381536 +zero, 399095, 399095, 0, 152, 13.4, 15.1, 17.7, 20.7, 1296.3, 1713381558 +zero, 499187, 499187, 0, 149, 13.6, 15.5, 18.7, 21.5, 26.3, 1713381572 +zero, 600656, 600656, 0, 138, 13.8, 16.0, 19.0, 21.7, 27.8, 1713381587 +zero, 700555, 700555, 0, 192, 13.9, 16.2, 19.3, 22.4, 31.6, 1713381603 +zero, 799348, 799348, 0, 184, 14.2, 16.7, 20.0, 23.3, 27.7, 1713381620 +zero, 901277, 901277, 0, 253, 14.4, 17.1, 20.7, 24.6, 32.8, 1713381638 +zero, 998875, 998875, 0, 266, 14.6, 17.5, 21.3, 25.3, 30.0, 1713381656 +zero, 1100173, 1100173, 0, 312, 15.1, 18.3, 22.4, 27.0, 32.6, 1713381676 +zero, 1200628, 1200628, 0, 335, 15.5, 18.9, 23.3, 27.9, 32.8, 1713381697 +zero, 1297830, 1297830, 0, 410, 15.8, 19.4, 24.3, 29.8, 36.5, 1713381718 +zero, 1398593, 1398593, 0, 420, 16.1, 20.0, 25.5, 32.3, 40.5, 1713381740 +zero, 1498855, 1498855, 0, 492, 16.8, 21.1, 27.4, 35.4, 44.8, 1713381763 +zero, 1599726, 1599726, 0, 579, 17.6, 22.5, 30.0, 39.1, 47.1, 1713381787 +zero, 1697617, 1697617, 0, 453, 18.0, 23.2, 31.7, 40.8, 47.5, 1713381812 +zero, 1796314, 1796314, 0, 634, 18.8, 24.8, 35.5, 44.4, 52.1, 1713381838 +zero, 1897517, 1897517, 0, 629, 19.5, 26.4, 37.9, 48.2, 61.6, 1713381865 +zero, 1999266, 1999266, 0, 606, 21.0, 29.7, 43.5, 56.2, 70.4, 1713381892 +zero, 2102472, 2102472, 0, 778, 22.3, 32.6, 48.5, 65.8, 87.7, 1713381921 +zero, 2200718, 2200718, 0, 891, 23.7, 38.0, 59.2, 81.1, 103.5, 1713381950 +zero, 2301468, 2301468, 0, 1184, 26.8, 47.3, 79.6, 112.0, 144.4, 1713381981 +zero, 2397395, 2397395, 0, 1447, 31.3, 73.9, 139.5, 201.9, 278.1, 1713382406 +zero, 2497858, 2497858, 0, 1316, 33.6, 101.9, 198.7, 296.9, 389.0, 1713382439 +zero, 2597631, 2597631, 0, 2973, 43.4, 245.1, 458.2, 658.1, 846.9, 1713382077 +zero, 2692099, 2692099, 0, 6758, 76.1, 394.0, 783.0, 1190.6, 2176.4, 1713382111 +zero, 2795863, 2795863, 0, 6926, 127.1, 557.3, 1159.0, 1946.1, 2755.8, 1713382542 +zero, 2890606, 2890606, 0, 8953, 169.8, 769.6, 1738.1, 2580.5, 3205.7, 1713382181 +zero, 2984029, 2984029, 0, 14500, 186.7, 805.4, 1785.3, 2737.7, 3930.0, 1713382615 diff --git a/paper_results/schbench/linux_cfs/112.txt b/paper_results/schbench/linux_cfs/112.txt new file mode 100644 index 0000000..232ed2c --- /dev/null +++ b/paper_results/schbench/linux_cfs/112.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101549 total samples) + 50.0th: 4648 (33462 samples) + 90.0th: 9296 (39276 samples) + * 99.0th: 9360 (7474 samples) + 99.9th: 13712 (534 samples) + min=1, max=25599 +Request Latencies percentiles (usec) runtime 10 (s) (101812 total samples) + 50.0th: 7000 (28245 samples) + 90.0th: 11664 (41597 samples) + * 99.0th: 12272 (6303 samples) + 99.9th: 18656 (909 samples) + min=2428, max=118316 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10160 (3 samples) + * 50.0th: 10224 (8 samples) + 90.0th: 10224 (0 samples) + min=9966, max=10217 +current rps: 10217 +Wakeup Latencies percentiles (usec) runtime 20 (s) (203285 total samples) + 50.0th: 4648 (65574 samples) + 90.0th: 9296 (79446 samples) + * 99.0th: 9360 (15216 samples) + 99.9th: 13200 (893 samples) + min=1, max=25599 +Request Latencies percentiles (usec) runtime 20 (s) (203908 total samples) + 50.0th: 7000 (56557 samples) + 90.0th: 11664 (83995 samples) + * 99.0th: 12112 (12559 samples) + 99.9th: 16544 (1775 samples) + min=2428, max=118316 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10192 (8 samples) + * 50.0th: 10224 (13 samples) + 90.0th: 10224 (0 samples) + min=9966, max=10223 +current rps: 10209 +Wakeup Latencies percentiles (usec) runtime 30 (s) (305048 total samples) + 50.0th: 4648 (98025 samples) + 90.0th: 9296 (119351 samples) + * 99.0th: 9360 (22605 samples) + 99.9th: 11792 (1386 samples) + min=1, max=25599 +Request Latencies percentiles (usec) runtime 30 (s) (306026 total samples) + 50.0th: 7000 (84110 samples) + 90.0th: 11632 (118198 samples) + * 99.0th: 12048 (27497 samples) + 99.9th: 15984 (2729 samples) + min=2428, max=118316 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10192 (9 samples) + * 50.0th: 10224 (22 samples) + 90.0th: 10224 (0 samples) + min=9966, max=10223 +current rps: 10209 +Wakeup Latencies percentiles (usec) runtime 30 (s) (305094 total samples) + 50.0th: 4648 (98036 samples) + 90.0th: 9296 (119381 samples) + * 99.0th: 9360 (22606 samples) + 99.9th: 11792 (1390 samples) + min=1, max=25599 +Request Latencies percentiles (usec) runtime 30 (s) (306139 total samples) + 50.0th: 7000 (84136 samples) + 90.0th: 11632 (118256 samples) + * 99.0th: 12048 (27504 samples) + 99.9th: 15984 (2733 samples) + min=2428, max=118316 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10192 (9 samples) + * 50.0th: 10224 (22 samples) + 90.0th: 10224 (0 samples) + min=9966, max=10223 +average rps: 10205 +message_threads 1 +worker_threads 112 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/128.txt b/paper_results/schbench/linux_cfs/128.txt new file mode 100644 index 0000000..2a786c5 --- /dev/null +++ b/paper_results/schbench/linux_cfs/128.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101551 total samples) + 50.0th: 4680 (39475 samples) + 90.0th: 9360 (36308 samples) + * 99.0th: 11696 (5010 samples) + 99.9th: 16304 (346 samples) + min=1, max=25776 +Request Latencies percentiles (usec) runtime 10 (s) (101839 total samples) + 50.0th: 7032 (28722 samples) + 90.0th: 11728 (39254 samples) + * 99.0th: 14448 (8994 samples) + 99.9th: 25760 (919 samples) + min=2421, max=64770 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10128 (3 samples) + * 50.0th: 10224 (8 samples) + 90.0th: 10224 (0 samples) + min=9958, max=10221 +current rps: 10210 +Wakeup Latencies percentiles (usec) runtime 20 (s) (203372 total samples) + 50.0th: 4680 (78510 samples) + 90.0th: 9360 (73377 samples) + * 99.0th: 11696 (10103 samples) + 99.9th: 14064 (508 samples) + min=1, max=25776 +Request Latencies percentiles (usec) runtime 20 (s) (204013 total samples) + 50.0th: 7032 (57881 samples) + 90.0th: 11728 (79467 samples) + * 99.0th: 14192 (17716 samples) + 99.9th: 21536 (1748 samples) + min=2421, max=64770 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10224 (21 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10224 (0 samples) + min=9958, max=10221 +current rps: 10217 +Wakeup Latencies percentiles (usec) runtime 30 (s) (305142 total samples) + 50.0th: 4680 (117560 samples) + 90.0th: 9360 (110353 samples) + * 99.0th: 11696 (15144 samples) + 99.9th: 14032 (777 samples) + min=1, max=25776 +Request Latencies percentiles (usec) runtime 30 (s) (306150 total samples) + 50.0th: 7032 (87932 samples) + 90.0th: 11728 (119699 samples) + * 99.0th: 14160 (26307 samples) + 99.9th: 19040 (2655 samples) + min=2421, max=64770 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10224 (31 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10224 (0 samples) + min=9958, max=10221 +current rps: 10210 +Wakeup Latencies percentiles (usec) runtime 30 (s) (305195 total samples) + 50.0th: 4680 (117565 samples) + 90.0th: 9360 (110390 samples) + * 99.0th: 11696 (15153 samples) + 99.9th: 14032 (779 samples) + min=1, max=25776 +Request Latencies percentiles (usec) runtime 30 (s) (306280 total samples) + 50.0th: 7032 (87974 samples) + 90.0th: 11728 (119757 samples) + * 99.0th: 14160 (26318 samples) + 99.9th: 19040 (2657 samples) + min=2421, max=64770 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10224 (31 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10224 (0 samples) + min=9958, max=10221 +average rps: 10209 +message_threads 1 +worker_threads 128 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/144.txt b/paper_results/schbench/linux_cfs/144.txt new file mode 100644 index 0000000..aed4c3a --- /dev/null +++ b/paper_results/schbench/linux_cfs/144.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101466 total samples) + 50.0th: 4696 (31810 samples) + 90.0th: 11664 (40797 samples) + * 99.0th: 11792 (7666 samples) + 99.9th: 18272 (792 samples) + min=1, max=72719 +Request Latencies percentiles (usec) runtime 10 (s) (101734 total samples) + 50.0th: 8056 (26859 samples) + 90.0th: 14032 (42520 samples) + * 99.0th: 16416 (7430 samples) + 99.9th: 32736 (812 samples) + min=2421, max=129471 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10128 (3 samples) + * 50.0th: 10192 (6 samples) + 90.0th: 10224 (2 samples) + min=9956, max=10209 +current rps: 10209 +Wakeup Latencies percentiles (usec) runtime 20 (s) (203049 total samples) + 50.0th: 4696 (59821 samples) + 90.0th: 11664 (80784 samples) + * 99.0th: 11760 (16654 samples) + 99.9th: 16416 (1593 samples) + min=1, max=72719 +Request Latencies percentiles (usec) runtime 20 (s) (203715 total samples) + 50.0th: 8432 (56298 samples) + 90.0th: 14032 (83884 samples) + * 99.0th: 16336 (15962 samples) + 99.9th: 24736 (1774 samples) + min=2421, max=129471 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10192 (19 samples) + * 50.0th: 10192 (0 samples) + 90.0th: 10192 (0 samples) + min=9956, max=10209 +current rps: 10200 +Wakeup Latencies percentiles (usec) runtime 30 (s) (304606 total samples) + 50.0th: 4696 (90370 samples) + 90.0th: 11664 (120786 samples) + * 99.0th: 11760 (25985 samples) + 99.9th: 16368 (2316 samples) + min=1, max=72719 +Request Latencies percentiles (usec) runtime 30 (s) (305662 total samples) + 50.0th: 8592 (85956 samples) + 90.0th: 14032 (124979 samples) + * 99.0th: 16272 (24803 samples) + 99.9th: 23008 (2723 samples) + min=2421, max=129471 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10192 (29 samples) + * 50.0th: 10192 (0 samples) + 90.0th: 10192 (0 samples) + min=9956, max=10209 +current rps: 10191 +Wakeup Latencies percentiles (usec) runtime 30 (s) (304659 total samples) + 50.0th: 4696 (90376 samples) + 90.0th: 11664 (120820 samples) + * 99.0th: 11760 (25994 samples) + 99.9th: 16368 (2319 samples) + min=1, max=72719 +Request Latencies percentiles (usec) runtime 30 (s) (305809 total samples) + 50.0th: 8592 (85995 samples) + 90.0th: 14032 (125034 samples) + * 99.0th: 16272 (24822 samples) + 99.9th: 23008 (2723 samples) + min=2421, max=129471 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10192 (29 samples) + * 50.0th: 10192 (0 samples) + 90.0th: 10192 (0 samples) + min=9956, max=10209 +average rps: 10194 +message_threads 1 +worker_threads 144 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/16.txt b/paper_results/schbench/linux_cfs/16.txt new file mode 100644 index 0000000..99c6671 --- /dev/null +++ b/paper_results/schbench/linux_cfs/16.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (63921 total samples) + 50.0th: 6 (37761 samples) + 90.0th: 7 (10076 samples) + * 99.0th: 8 (1866 samples) + 99.9th: 11 (558 samples) + min=1, max=38 +Request Latencies percentiles (usec) runtime 10 (s) (64124 total samples) + 50.0th: 2484 (27995 samples) + 90.0th: 2492 (14647 samples) + * 99.0th: 2516 (1649 samples) + 99.9th: 2556 (377 samples) + min=2419, max=3915 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 6408 (8 samples) + * 50.0th: 6408 (0 samples) + 90.0th: 6424 (3 samples) + min=6404, max=6418 +current rps: 6410 +Wakeup Latencies percentiles (usec) runtime 20 (s) (127847 total samples) + 50.0th: 6 (77272 samples) + 90.0th: 7 (19989 samples) + * 99.0th: 8 (3562 samples) + 99.9th: 10 (889 samples) + min=1, max=38 +Request Latencies percentiles (usec) runtime 20 (s) (128220 total samples) + 50.0th: 2484 (59104 samples) + 90.0th: 2492 (34713 samples) + * 99.0th: 2508 (2322 samples) + 99.9th: 2540 (1079 samples) + min=2419, max=3915 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 6408 (18 samples) + * 50.0th: 6408 (0 samples) + 90.0th: 6424 (3 samples) + min=6404, max=6418 +current rps: 6409 +Wakeup Latencies percentiles (usec) runtime 30 (s) (191791 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (30815 samples) + * 99.0th: 8 (5535 samples) + 99.9th: 10 (1293 samples) + min=1, max=38 +Request Latencies percentiles (usec) runtime 30 (s) (192344 total samples) + 50.0th: 2484 (87556 samples) + 90.0th: 2492 (51198 samples) + * 99.0th: 2508 (3240 samples) + 99.9th: 2540 (1411 samples) + min=2419, max=3915 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 6408 (26 samples) + * 50.0th: 6408 (0 samples) + 90.0th: 6424 (5 samples) + min=6404, max=6420 +current rps: 6411 +Wakeup Latencies percentiles (usec) runtime 30 (s) (191791 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (30815 samples) + * 99.0th: 8 (5535 samples) + 99.9th: 10 (1293 samples) + min=1, max=38 +Request Latencies percentiles (usec) runtime 30 (s) (192360 total samples) + 50.0th: 2484 (87562 samples) + 90.0th: 2492 (51204 samples) + * 99.0th: 2508 (3241 samples) + 99.9th: 2540 (1412 samples) + min=2419, max=3915 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 6408 (26 samples) + * 50.0th: 6408 (0 samples) + 90.0th: 6424 (5 samples) + min=6404, max=6420 +average rps: 6412 +message_threads 1 +worker_threads 16 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/160.txt b/paper_results/schbench/linux_cfs/160.txt new file mode 100644 index 0000000..f7a8b26 --- /dev/null +++ b/paper_results/schbench/linux_cfs/160.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101704 total samples) + 50.0th: 6968 (29948 samples) + 90.0th: 11696 (40011 samples) + * 99.0th: 14032 (8856 samples) + 99.9th: 21664 (833 samples) + min=1, max=68863 +Request Latencies percentiles (usec) runtime 10 (s) (101928 total samples) + 50.0th: 9328 (27520 samples) + 90.0th: 15312 (39950 samples) + * 99.0th: 17952 (9175 samples) + 99.9th: 39744 (910 samples) + min=2423, max=130917 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10160 (3 samples) + * 50.0th: 10224 (8 samples) + 90.0th: 10224 (0 samples) + min=9963, max=10229 +current rps: 10218 +Wakeup Latencies percentiles (usec) runtime 20 (s) (203626 total samples) + 50.0th: 6968 (60283 samples) + 90.0th: 11696 (81155 samples) + * 99.0th: 14032 (17817 samples) + 99.9th: 19744 (1296 samples) + min=1, max=68863 +Request Latencies percentiles (usec) runtime 20 (s) (204181 total samples) + 50.0th: 9328 (53929 samples) + 90.0th: 15088 (81552 samples) + * 99.0th: 17056 (18371 samples) + 99.9th: 28640 (1833 samples) + min=2423, max=130917 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10224 (21 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10224 (0 samples) + min=9963, max=10232 +current rps: 10220 +Wakeup Latencies percentiles (usec) runtime 30 (s) (305560 total samples) + 50.0th: 6968 (90598 samples) + 90.0th: 11696 (122677 samples) + * 99.0th: 14032 (26700 samples) + 99.9th: 18720 (1840 samples) + min=1, max=68863 +Request Latencies percentiles (usec) runtime 30 (s) (306434 total samples) + 50.0th: 9360 (100425 samples) + 90.0th: 14992 (103040 samples) + * 99.0th: 16864 (27546 samples) + 99.9th: 26016 (2760 samples) + min=2423, max=130917 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10224 (31 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10224 (0 samples) + min=9963, max=10233 +current rps: 10228 +Wakeup Latencies percentiles (usec) runtime 30 (s) (305642 total samples) + 50.0th: 6968 (90614 samples) + 90.0th: 11696 (122716 samples) + * 99.0th: 14032 (26718 samples) + 99.9th: 18720 (1848 samples) + min=1, max=68863 +Request Latencies percentiles (usec) runtime 30 (s) (306598 total samples) + 50.0th: 9360 (100470 samples) + 90.0th: 14992 (103101 samples) + * 99.0th: 16928 (27670 samples) + 99.9th: 26016 (2656 samples) + min=2423, max=130917 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10224 (31 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10224 (0 samples) + min=9963, max=10233 +average rps: 10220 +message_threads 1 +worker_threads 160 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/176.txt b/paper_results/schbench/linux_cfs/176.txt new file mode 100644 index 0000000..df9e107 --- /dev/null +++ b/paper_results/schbench/linux_cfs/176.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101677 total samples) + 50.0th: 7000 (28150 samples) + 90.0th: 14000 (41362 samples) + * 99.0th: 16368 (8454 samples) + 99.9th: 25696 (595 samples) + min=1, max=94063 +Request Latencies percentiles (usec) runtime 10 (s) (101940 total samples) + 50.0th: 9360 (31175 samples) + 90.0th: 16368 (39971 samples) + * 99.0th: 19296 (7984 samples) + 99.9th: 53312 (906 samples) + min=2420, max=140793 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10160 (3 samples) + * 50.0th: 10224 (8 samples) + 90.0th: 10224 (0 samples) + min=9972, max=10227 +current rps: 10221 +Wakeup Latencies percentiles (usec) runtime 20 (s) (203537 total samples) + 50.0th: 7016 (68654 samples) + 90.0th: 14000 (71659 samples) + * 99.0th: 16368 (18507 samples) + 99.9th: 23392 (953 samples) + min=1, max=94063 +Request Latencies percentiles (usec) runtime 20 (s) (204183 total samples) + 50.0th: 9360 (62210 samples) + 90.0th: 16368 (81587 samples) + * 99.0th: 18848 (15672 samples) + 99.9th: 38464 (1781 samples) + min=2420, max=140793 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10224 (21 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10224 (0 samples) + min=9972, max=10230 +current rps: 10220 +Wakeup Latencies percentiles (usec) runtime 30 (s) (305407 total samples) + 50.0th: 7016 (103991 samples) + 90.0th: 14000 (106970 samples) + * 99.0th: 16368 (28911 samples) + 99.9th: 23328 (1280 samples) + min=1, max=94063 +Request Latencies percentiles (usec) runtime 30 (s) (306432 total samples) + 50.0th: 9360 (93230 samples) + 90.0th: 16368 (123264 samples) + * 99.0th: 18848 (23673 samples) + 99.9th: 31520 (2327 samples) + min=2420, max=140793 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10224 (31 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10224 (0 samples) + min=9972, max=10234 +current rps: 10218 +Wakeup Latencies percentiles (usec) runtime 30 (s) (305488 total samples) + 50.0th: 7016 (104003 samples) + 90.0th: 14000 (107017 samples) + * 99.0th: 16368 (28930 samples) + 99.9th: 23328 (1283 samples) + min=1, max=94063 +Request Latencies percentiles (usec) runtime 30 (s) (306614 total samples) + 50.0th: 9360 (93280 samples) + 90.0th: 16368 (123333 samples) + * 99.0th: 18848 (23701 samples) + 99.9th: 31520 (2329 samples) + min=2420, max=140793 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10224 (31 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10224 (0 samples) + min=9972, max=10234 +average rps: 10220 +message_threads 1 +worker_threads 176 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/192.txt b/paper_results/schbench/linux_cfs/192.txt new file mode 100644 index 0000000..d278ade --- /dev/null +++ b/paper_results/schbench/linux_cfs/192.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101797 total samples) + 50.0th: 7016 (30770 samples) + 90.0th: 16272 (40279 samples) + * 99.0th: 16480 (8610 samples) + 99.9th: 28000 (891 samples) + min=1, max=81305 +Request Latencies percentiles (usec) runtime 10 (s) (102020 total samples) + 50.0th: 9424 (30125 samples) + 90.0th: 18656 (43270 samples) + * 99.0th: 21024 (6753 samples) + 99.9th: 53696 (869 samples) + min=2422, max=143723 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10160 (3 samples) + * 50.0th: 10224 (8 samples) + 90.0th: 10224 (0 samples) + min=9990, max=10235 +current rps: 10230 +Wakeup Latencies percentiles (usec) runtime 20 (s) (203776 total samples) + 50.0th: 7016 (51254 samples) + 90.0th: 16272 (81584 samples) + * 99.0th: 16416 (18069 samples) + 99.9th: 25824 (1761 samples) + min=1, max=81305 +Request Latencies percentiles (usec) runtime 20 (s) (204323 total samples) + 50.0th: 9552 (60299 samples) + 90.0th: 18656 (85666 samples) + * 99.0th: 20448 (14380 samples) + 99.9th: 40768 (1836 samples) + min=2422, max=143723 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10224 (21 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10224 (0 samples) + min=9990, max=10235 +current rps: 10225 +Wakeup Latencies percentiles (usec) runtime 30 (s) (305661 total samples) + 50.0th: 7032 (79878 samples) + 90.0th: 16304 (125504 samples) + * 99.0th: 16416 (21814 samples) + 99.9th: 25696 (2583 samples) + min=1, max=81305 +Request Latencies percentiles (usec) runtime 30 (s) (306608 total samples) + 50.0th: 9712 (91298 samples) + 90.0th: 18656 (127998 samples) + * 99.0th: 19872 (22276 samples) + 99.9th: 34496 (2719 samples) + min=2422, max=143723 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10224 (31 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10224 (0 samples) + min=9990, max=10235 +current rps: 10226 +Wakeup Latencies percentiles (usec) runtime 30 (s) (305751 total samples) + 50.0th: 7032 (79891 samples) + 90.0th: 16304 (125562 samples) + * 99.0th: 16416 (21829 samples) + 99.9th: 25696 (2587 samples) + min=1, max=81305 +Request Latencies percentiles (usec) runtime 30 (s) (306805 total samples) + 50.0th: 9712 (91362 samples) + 90.0th: 18656 (128077 samples) + * 99.0th: 19872 (22297 samples) + 99.9th: 34496 (2720 samples) + min=2422, max=143723 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10224 (31 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10224 (0 samples) + min=9990, max=10235 +average rps: 10227 +message_threads 1 +worker_threads 192 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/208.txt b/paper_results/schbench/linux_cfs/208.txt new file mode 100644 index 0000000..d6b32ad --- /dev/null +++ b/paper_results/schbench/linux_cfs/208.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101523 total samples) + 50.0th: 9296 (29426 samples) + 90.0th: 16416 (42842 samples) + * 99.0th: 18784 (6678 samples) + 99.9th: 30496 (915 samples) + min=1, max=85833 +Request Latencies percentiles (usec) runtime 10 (s) (101765 total samples) + 50.0th: 11664 (31018 samples) + 90.0th: 18848 (40483 samples) + * 99.0th: 23392 (8677 samples) + 99.9th: 58304 (874 samples) + min=2419, max=187580 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10128 (3 samples) + * 50.0th: 10192 (5 samples) + 90.0th: 10224 (3 samples) + min=9954, max=10210 +current rps: 10210 +Wakeup Latencies percentiles (usec) runtime 20 (s) (203200 total samples) + 50.0th: 9328 (61183 samples) + 90.0th: 16416 (85079 samples) + * 99.0th: 18784 (13875 samples) + 99.9th: 29856 (1520 samples) + min=1, max=85833 +Request Latencies percentiles (usec) runtime 20 (s) (203818 total samples) + 50.0th: 11696 (64070 samples) + 90.0th: 18848 (78846 samples) + * 99.0th: 21536 (17366 samples) + 99.9th: 45888 (1787 samples) + min=2419, max=187580 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10192 (18 samples) + * 50.0th: 10192 (0 samples) + 90.0th: 10224 (3 samples) + min=9954, max=10210 +current rps: 10204 +Wakeup Latencies percentiles (usec) runtime 30 (s) (304901 total samples) + 50.0th: 9360 (107851 samples) + 90.0th: 16416 (112530 samples) + * 99.0th: 18784 (21087 samples) + 99.9th: 28896 (2148 samples) + min=1, max=85833 +Request Latencies percentiles (usec) runtime 30 (s) (305875 total samples) + 50.0th: 11696 (94991 samples) + 90.0th: 18848 (120068 samples) + * 99.0th: 21344 (26027 samples) + 99.9th: 38720 (2739 samples) + min=2419, max=187580 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10192 (27 samples) + * 50.0th: 10192 (0 samples) + 90.0th: 10224 (4 samples) + min=9954, max=10210 +current rps: 10200 +Wakeup Latencies percentiles (usec) runtime 30 (s) (304987 total samples) + 50.0th: 9360 (96598 samples) + 90.0th: 16416 (112580 samples) + * 99.0th: 18784 (21102 samples) + 99.9th: 28896 (2150 samples) + min=1, max=85833 +Request Latencies percentiles (usec) runtime 30 (s) (306088 total samples) + 50.0th: 11696 (95054 samples) + 90.0th: 18848 (120165 samples) + * 99.0th: 21344 (26039 samples) + 99.9th: 38592 (2741 samples) + min=2419, max=187580 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10192 (27 samples) + * 50.0th: 10192 (0 samples) + 90.0th: 10224 (4 samples) + min=9954, max=10210 +average rps: 10203 +message_threads 1 +worker_threads 208 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/224.txt b/paper_results/schbench/linux_cfs/224.txt new file mode 100644 index 0000000..687b0bd --- /dev/null +++ b/paper_results/schbench/linux_cfs/224.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101881 total samples) + 50.0th: 9328 (30068 samples) + 90.0th: 18656 (42689 samples) + * 99.0th: 21088 (6707 samples) + 99.9th: 35264 (831 samples) + min=1, max=76681 +Request Latencies percentiles (usec) runtime 10 (s) (102076 total samples) + 50.0th: 11664 (30848 samples) + 90.0th: 21024 (43115 samples) + * 99.0th: 25696 (6283 samples) + 99.9th: 66176 (912 samples) + min=2421, max=194020 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10160 (3 samples) + * 50.0th: 10224 (5 samples) + 90.0th: 10256 (3 samples) + min=9979, max=10248 +current rps: 10238 +Wakeup Latencies percentiles (usec) runtime 20 (s) (203914 total samples) + 50.0th: 9360 (68392 samples) + 90.0th: 18656 (77558 samples) + * 99.0th: 21024 (14632 samples) + 99.9th: 32832 (1726 samples) + min=1, max=76681 +Request Latencies percentiles (usec) runtime 20 (s) (204398 total samples) + 50.0th: 11696 (66297 samples) + 90.0th: 21024 (82614 samples) + * 99.0th: 23520 (12611 samples) + 99.9th: 48960 (1753 samples) + min=2421, max=194020 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10224 (18 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10256 (3 samples) + min=9979, max=10248 +current rps: 10208 +Wakeup Latencies percentiles (usec) runtime 30 (s) (305880 total samples) + 50.0th: 9360 (91754 samples) + 90.0th: 18656 (116112 samples) + * 99.0th: 21024 (23371 samples) + 99.9th: 32736 (2513 samples) + min=1, max=76681 +Request Latencies percentiles (usec) runtime 30 (s) (306718 total samples) + 50.0th: 11696 (97801 samples) + 90.0th: 21024 (125569 samples) + * 99.0th: 23456 (19069 samples) + 99.9th: 42432 (2591 samples) + min=2421, max=194020 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10224 (28 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10224 (0 samples) + min=9979, max=10248 +current rps: 10227 +Wakeup Latencies percentiles (usec) runtime 30 (s) (305978 total samples) + 50.0th: 9360 (91775 samples) + 90.0th: 18656 (116166 samples) + * 99.0th: 21024 (23390 samples) + 99.9th: 32736 (2514 samples) + min=1, max=76681 +Request Latencies percentiles (usec) runtime 30 (s) (306945 total samples) + 50.0th: 11696 (97862 samples) + 90.0th: 21024 (125681 samples) + * 99.0th: 23456 (19088 samples) + 99.9th: 42432 (2593 samples) + min=2421, max=194020 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10224 (28 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10224 (0 samples) + min=9979, max=10248 +average rps: 10232 +message_threads 1 +worker_threads 224 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/24.txt b/paper_results/schbench/linux_cfs/24.txt new file mode 100644 index 0000000..70476c1 --- /dev/null +++ b/paper_results/schbench/linux_cfs/24.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (95067 total samples) + 50.0th: 6 (59604 samples) + 90.0th: 7 (7524 samples) + * 99.0th: 17 (4246 samples) + 99.9th: 1766 (839 samples) + min=1, max=3602 +Request Latencies percentiles (usec) runtime 10 (s) (95588 total samples) + 50.0th: 2484 (32125 samples) + 90.0th: 2492 (29872 samples) + * 99.0th: 2532 (5351 samples) + 99.9th: 4728 (903 samples) + min=2420, max=4898 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9520 (3 samples) + * 50.0th: 9584 (8 samples) + 90.0th: 9584 (0 samples) + min=9352, max=9593 +current rps: 9582 +Wakeup Latencies percentiles (usec) runtime 20 (s) (190446 total samples) + 50.0th: 6 (120089 samples) + 90.0th: 7 (15834 samples) + * 99.0th: 17 (8460 samples) + 99.9th: 903 (1688 samples) + min=1, max=3602 +Request Latencies percentiles (usec) runtime 20 (s) (191498 total samples) + 50.0th: 2484 (61047 samples) + 90.0th: 2492 (59839 samples) + * 99.0th: 2532 (10892 samples) + 99.9th: 4712 (1343 samples) + min=2419, max=4898 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9584 (21 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9584 (0 samples) + min=9352, max=9595 +current rps: 9587 +Wakeup Latencies percentiles (usec) runtime 30 (s) (285779 total samples) + 50.0th: 6 (182014 samples) + 90.0th: 7 (23354 samples) + * 99.0th: 17 (12847 samples) + 99.9th: 875 (2551 samples) + min=1, max=3602 +Request Latencies percentiles (usec) runtime 30 (s) (287355 total samples) + 50.0th: 2484 (95726 samples) + 90.0th: 2492 (89638 samples) + * 99.0th: 2532 (16202 samples) + 99.9th: 4696 (1736 samples) + min=2419, max=4898 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9584 (31 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9584 (0 samples) + min=9352, max=9597 +current rps: 9559 +Wakeup Latencies percentiles (usec) runtime 30 (s) (285779 total samples) + 50.0th: 6 (182014 samples) + 90.0th: 7 (23354 samples) + * 99.0th: 17 (12847 samples) + 99.9th: 875 (2551 samples) + min=1, max=3602 +Request Latencies percentiles (usec) runtime 30 (s) (287379 total samples) + 50.0th: 2484 (95735 samples) + 90.0th: 2492 (89640 samples) + * 99.0th: 2532 (16210 samples) + 99.9th: 4696 (1738 samples) + min=2419, max=4898 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9584 (31 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9584 (0 samples) + min=9352, max=9597 +average rps: 9579 +message_threads 1 +worker_threads 24 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/240.txt b/paper_results/schbench/linux_cfs/240.txt new file mode 100644 index 0000000..eebb8d6 --- /dev/null +++ b/paper_results/schbench/linux_cfs/240.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101894 total samples) + 50.0th: 9360 (28730 samples) + 90.0th: 18784 (40740 samples) + * 99.0th: 23392 (8867 samples) + 99.9th: 43712 (888 samples) + min=1, max=165803 +Request Latencies percentiles (usec) runtime 10 (s) (102045 total samples) + 50.0th: 11696 (32048 samples) + 90.0th: 21856 (39409 samples) + * 99.0th: 27488 (9159 samples) + 99.9th: 74624 (918 samples) + min=2418, max=196242 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10160 (3 samples) + * 50.0th: 10224 (5 samples) + 90.0th: 10256 (3 samples) + min=9951, max=10243 +current rps: 10238 +Wakeup Latencies percentiles (usec) runtime 20 (s) (203987 total samples) + 50.0th: 9456 (57023 samples) + 90.0th: 18784 (81534 samples) + * 99.0th: 23328 (18427 samples) + 99.9th: 39616 (1724 samples) + min=1, max=165803 +Request Latencies percentiles (usec) runtime 20 (s) (204477 total samples) + 50.0th: 11728 (63229 samples) + 90.0th: 21920 (79956 samples) + * 99.0th: 25632 (18348 samples) + 99.9th: 56128 (1827 samples) + min=2418, max=196242 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10224 (12 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10256 (9 samples) + min=9951, max=10244 +current rps: 10237 +Wakeup Latencies percentiles (usec) runtime 30 (s) (305958 total samples) + 50.0th: 9648 (85401 samples) + 90.0th: 18848 (122579 samples) + * 99.0th: 23328 (27519 samples) + 99.9th: 37568 (2577 samples) + min=1, max=165803 +Request Latencies percentiles (usec) runtime 30 (s) (306768 total samples) + 50.0th: 11728 (93201 samples) + 90.0th: 21984 (121562 samples) + * 99.0th: 25568 (27657 samples) + 99.9th: 49216 (2693 samples) + min=2418, max=196242 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10224 (20 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10256 (11 samples) + min=9951, max=10245 +current rps: 10231 +Wakeup Latencies percentiles (usec) runtime 30 (s) (306063 total samples) + 50.0th: 9680 (85498 samples) + 90.0th: 18848 (122568 samples) + * 99.0th: 23328 (27536 samples) + 99.9th: 37568 (2577 samples) + min=1, max=165803 +Request Latencies percentiles (usec) runtime 30 (s) (307016 total samples) + 50.0th: 11728 (93246 samples) + 90.0th: 21984 (121683 samples) + * 99.0th: 25568 (27675 samples) + 99.9th: 49216 (2693 samples) + min=2418, max=196242 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10224 (20 samples) + * 50.0th: 10224 (0 samples) + 90.0th: 10256 (11 samples) + min=9951, max=10245 +average rps: 10234 +message_threads 1 +worker_threads 240 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/256.txt b/paper_results/schbench/linux_cfs/256.txt new file mode 100644 index 0000000..d7faeb3 --- /dev/null +++ b/paper_results/schbench/linux_cfs/256.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (102008 total samples) + 50.0th: 10928 (29477 samples) + 90.0th: 20960 (40979 samples) + * 99.0th: 25760 (9014 samples) + 99.9th: 42560 (901 samples) + min=1, max=110082 +Request Latencies percentiles (usec) runtime 10 (s) (102120 total samples) + 50.0th: 11696 (32278 samples) + 90.0th: 23392 (41036 samples) + * 99.0th: 32544 (7331 samples) + 99.9th: 79488 (918 samples) + min=2420, max=337847 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10160 (3 samples) + * 50.0th: 10256 (8 samples) + 90.0th: 10256 (0 samples) + min=9953, max=10252 +current rps: 10250 +Wakeup Latencies percentiles (usec) runtime 20 (s) (204126 total samples) + 50.0th: 11632 (60871 samples) + 90.0th: 21024 (85822 samples) + * 99.0th: 25632 (12769 samples) + 99.9th: 40896 (1781 samples) + min=1, max=110082 +Request Latencies percentiles (usec) runtime 20 (s) (204584 total samples) + 50.0th: 11760 (61705 samples) + 90.0th: 23392 (85052 samples) + * 99.0th: 28000 (14893 samples) + 99.9th: 60992 (1808 samples) + min=2420, max=337847 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10224 (5 samples) + * 50.0th: 10256 (16 samples) + 90.0th: 10256 (0 samples) + min=9953, max=10252 +current rps: 10250 +Wakeup Latencies percentiles (usec) runtime 30 (s) (306220 total samples) + 50.0th: 11632 (89613 samples) + 90.0th: 21024 (130000 samples) + * 99.0th: 25504 (19614 samples) + 99.9th: 39872 (2756 samples) + min=1, max=110082 +Request Latencies percentiles (usec) runtime 30 (s) (307064 total samples) + 50.0th: 11920 (92259 samples) + 90.0th: 23392 (127854 samples) + * 99.0th: 26912 (22478 samples) + 99.9th: 51392 (2755 samples) + min=2420, max=337847 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10256 (31 samples) + * 50.0th: 10256 (0 samples) + 90.0th: 10256 (0 samples) + min=9953, max=10252 +current rps: 10248 +Wakeup Latencies percentiles (usec) runtime 30 (s) (306357 total samples) + 50.0th: 11632 (89643 samples) + 90.0th: 21024 (130084 samples) + * 99.0th: 25568 (19775 samples) + 99.9th: 39872 (2618 samples) + min=1, max=110082 +Request Latencies percentiles (usec) runtime 30 (s) (307327 total samples) + 50.0th: 11920 (92328 samples) + 90.0th: 23392 (127967 samples) + * 99.0th: 26912 (22500 samples) + 99.9th: 51392 (2756 samples) + min=2420, max=337847 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10256 (31 samples) + * 50.0th: 10256 (0 samples) + 90.0th: 10256 (0 samples) + min=9953, max=10252 +average rps: 10244 +message_threads 1 +worker_threads 256 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/32.txt b/paper_results/schbench/linux_cfs/32.txt new file mode 100644 index 0000000..3858366 --- /dev/null +++ b/paper_results/schbench/linux_cfs/32.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (98354 total samples) + 50.0th: 5 (31023 samples) + 90.0th: 1126 (37337 samples) + * 99.0th: 2340 (9152 samples) + 99.9th: 2348 (555 samples) + min=1, max=3323 +Request Latencies percentiles (usec) runtime 10 (s) (99025 total samples) + 50.0th: 2500 (19240 samples) + 90.0th: 4168 (39332 samples) + * 99.0th: 4808 (9171 samples) + 99.9th: 5224 (617 samples) + min=2419, max=7139 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9872 (3 samples) + * 50.0th: 9936 (6 samples) + 90.0th: 9968 (2 samples) + min=9600, max=9953 +current rps: 9936 +Wakeup Latencies percentiles (usec) runtime 20 (s) (197205 total samples) + 50.0th: 5 (60539 samples) + 90.0th: 1122 (74169 samples) + * 99.0th: 2340 (18422 samples) + 99.9th: 2348 (1039 samples) + min=1, max=3570 +Request Latencies percentiles (usec) runtime 20 (s) (198515 total samples) + 50.0th: 2508 (40661 samples) + 90.0th: 4068 (78681 samples) + * 99.0th: 4792 (17830 samples) + 99.9th: 5080 (1724 samples) + min=2419, max=7139 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9936 (15 samples) + * 50.0th: 9936 (0 samples) + 90.0th: 9968 (6 samples) + min=9600, max=9955 +current rps: 9946 +Wakeup Latencies percentiles (usec) runtime 30 (s) (296011 total samples) + 50.0th: 5 (90320 samples) + 90.0th: 1118 (110659 samples) + * 99.0th: 2340 (27710 samples) + 99.9th: 2348 (1587 samples) + min=1, max=3570 +Request Latencies percentiles (usec) runtime 30 (s) (297959 total samples) + 50.0th: 2508 (61607 samples) + 90.0th: 4044 (118748 samples) + * 99.0th: 4792 (27025 samples) + 99.9th: 5032 (2346 samples) + min=2419, max=7139 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9936 (25 samples) + * 50.0th: 9936 (0 samples) + 90.0th: 9968 (6 samples) + min=9600, max=9955 +current rps: 9942 +Wakeup Latencies percentiles (usec) runtime 30 (s) (296015 total samples) + 50.0th: 5 (90321 samples) + 90.0th: 1118 (110660 samples) + * 99.0th: 2340 (27711 samples) + 99.9th: 2348 (1587 samples) + min=1, max=3570 +Request Latencies percentiles (usec) runtime 30 (s) (297993 total samples) + 50.0th: 2508 (61614 samples) + 90.0th: 4044 (118767 samples) + * 99.0th: 4792 (27028 samples) + 99.9th: 5032 (2346 samples) + min=2419, max=7139 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9936 (25 samples) + * 50.0th: 9936 (0 samples) + 90.0th: 9968 (6 samples) + min=9600, max=9955 +average rps: 9933 +message_threads 1 +worker_threads 32 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/4.txt b/paper_results/schbench/linux_cfs/4.txt new file mode 100644 index 0000000..22fe4d2 --- /dev/null +++ b/paper_results/schbench/linux_cfs/4.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (16008 total samples) + 50.0th: 6 (11059 samples) + 90.0th: 6 (0 samples) + * 99.0th: 7 (1330 samples) + 99.9th: 18 (124 samples) + min=1, max=72 +Request Latencies percentiles (usec) runtime 10 (s) (16018 total samples) + 50.0th: 2484 (7622 samples) + 90.0th: 2492 (1238 samples) + * 99.0th: 2524 (1088 samples) + 99.9th: 3748 (61 samples) + min=2420, max=4867 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 1602 (8 samples) + * 50.0th: 1602 (0 samples) + 90.0th: 1606 (3 samples) + min=1591, max=1605 +current rps: 1591 +Wakeup Latencies percentiles (usec) runtime 20 (s) (31963 total samples) + 50.0th: 6 (21244 samples) + 90.0th: 6 (0 samples) + * 99.0th: 19 (2374 samples) + 99.9th: 30 (288 samples) + min=1, max=74 +Request Latencies percentiles (usec) runtime 20 (s) (31987 total samples) + 50.0th: 2484 (14970 samples) + 90.0th: 2492 (2463 samples) + * 99.0th: 2524 (2221 samples) + 99.9th: 4712 (240 samples) + min=2420, max=5269 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 1598 (5 samples) + * 50.0th: 1602 (11 samples) + 90.0th: 1606 (5 samples) + min=1567, max=1606 +current rps: 1603 +Wakeup Latencies percentiles (usec) runtime 30 (s) (47951 total samples) + 50.0th: 6 (31469 samples) + 90.0th: 6 (0 samples) + * 99.0th: 20 (3466 samples) + 99.9th: 31 (327 samples) + min=1, max=74 +Request Latencies percentiles (usec) runtime 30 (s) (47993 total samples) + 50.0th: 2484 (23090 samples) + 90.0th: 2492 (4028 samples) + * 99.0th: 2524 (3591 samples) + 99.9th: 4664 (335 samples) + min=2420, max=5269 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 1598 (8 samples) + * 50.0th: 1602 (15 samples) + 90.0th: 1606 (8 samples) + min=1567, max=1606 +current rps: 1605 +Wakeup Latencies percentiles (usec) runtime 30 (s) (47952 total samples) + 50.0th: 6 (31470 samples) + 90.0th: 6 (0 samples) + * 99.0th: 20 (3466 samples) + 99.9th: 31 (327 samples) + min=1, max=74 +Request Latencies percentiles (usec) runtime 30 (s) (47998 total samples) + 50.0th: 2484 (23091 samples) + 90.0th: 2492 (4028 samples) + * 99.0th: 2524 (3592 samples) + 99.9th: 4664 (335 samples) + min=2420, max=5269 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 1598 (8 samples) + * 50.0th: 1602 (15 samples) + 90.0th: 1606 (8 samples) + min=1567, max=1606 +average rps: 1600 +message_threads 1 +worker_threads 4 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/40.txt b/paper_results/schbench/linux_cfs/40.txt new file mode 100644 index 0000000..aedd5c7 --- /dev/null +++ b/paper_results/schbench/linux_cfs/40.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (99267 total samples) + 50.0th: 7 (26158 samples) + 90.0th: 2332 (39356 samples) + * 99.0th: 2348 (7497 samples) + 99.9th: 2396 (414 samples) + min=1, max=4279 +Request Latencies percentiles (usec) runtime 10 (s) (99973 total samples) + 50.0th: 2932 (20368 samples) + 90.0th: 4744 (40375 samples) + * 99.0th: 4856 (8234 samples) + 99.9th: 6616 (821 samples) + min=2424, max=11691 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9936 (3 samples) + * 50.0th: 10032 (8 samples) + 90.0th: 10032 (0 samples) + min=9749, max=10038 +current rps: 10038 +Wakeup Latencies percentiles (usec) runtime 20 (s) (198926 total samples) + 50.0th: 7 (50929 samples) + 90.0th: 2332 (79639 samples) + * 99.0th: 2348 (14291 samples) + 99.9th: 2380 (748 samples) + min=1, max=4279 +Request Latencies percentiles (usec) runtime 20 (s) (200371 total samples) + 50.0th: 2932 (40735 samples) + 90.0th: 4728 (80742 samples) + * 99.0th: 4840 (17256 samples) + 99.9th: 6632 (1757 samples) + min=2420, max=11691 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10032 (20 samples) + * 50.0th: 10032 (0 samples) + 90.0th: 10032 (0 samples) + min=9749, max=10048 +current rps: 10030 +Wakeup Latencies percentiles (usec) runtime 30 (s) (298577 total samples) + 50.0th: 7 (75695 samples) + 90.0th: 2332 (119341 samples) + * 99.0th: 2348 (21679 samples) + 99.9th: 2380 (1059 samples) + min=1, max=4331 +Request Latencies percentiles (usec) runtime 30 (s) (300742 total samples) + 50.0th: 2940 (62815 samples) + 90.0th: 4728 (120742 samples) + * 99.0th: 4840 (25273 samples) + 99.9th: 6616 (2545 samples) + min=2417, max=11691 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10032 (30 samples) + * 50.0th: 10032 (0 samples) + 90.0th: 10032 (0 samples) + min=9749, max=10048 +current rps: 10028 +Wakeup Latencies percentiles (usec) runtime 30 (s) (298586 total samples) + 50.0th: 7 (75696 samples) + 90.0th: 2332 (119343 samples) + * 99.0th: 2348 (21681 samples) + 99.9th: 2380 (1062 samples) + min=1, max=4331 +Request Latencies percentiles (usec) runtime 30 (s) (300783 total samples) + 50.0th: 2940 (62824 samples) + 90.0th: 4728 (120758 samples) + * 99.0th: 4840 (25281 samples) + 99.9th: 6616 (2545 samples) + min=2417, max=11691 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10032 (30 samples) + * 50.0th: 10032 (0 samples) + 90.0th: 10032 (0 samples) + min=9749, max=10048 +average rps: 10026 +message_threads 1 +worker_threads 40 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/48.txt b/paper_results/schbench/linux_cfs/48.txt new file mode 100644 index 0000000..d5b8aef --- /dev/null +++ b/paper_results/schbench/linux_cfs/48.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (100058 total samples) + 50.0th: 10 (19154 samples) + 90.0th: 2340 (44844 samples) + * 99.0th: 2372 (3936 samples) + 99.9th: 4044 (658 samples) + min=1, max=5608 +Request Latencies percentiles (usec) runtime 10 (s) (100722 total samples) + 50.0th: 3540 (30217 samples) + 90.0th: 4792 (40934 samples) + * 99.0th: 6680 (8309 samples) + 99.9th: 8136 (907 samples) + min=2427, max=13647 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10032 (3 samples) + * 50.0th: 10096 (8 samples) + 90.0th: 10096 (0 samples) + min=9895, max=10109 +current rps: 10104 +Wakeup Latencies percentiles (usec) runtime 20 (s) (200472 total samples) + 50.0th: 10 (38265 samples) + 90.0th: 2340 (89352 samples) + * 99.0th: 2372 (7948 samples) + 99.9th: 3868 (1256 samples) + min=1, max=5608 +Request Latencies percentiles (usec) runtime 20 (s) (201770 total samples) + 50.0th: 3532 (60272 samples) + 90.0th: 4792 (82461 samples) + * 99.0th: 6632 (16323 samples) + 99.9th: 7880 (1812 samples) + min=2418, max=13647 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10096 (19 samples) + * 50.0th: 10096 (0 samples) + 90.0th: 10096 (0 samples) + min=9895, max=10116 +current rps: 10083 +Wakeup Latencies percentiles (usec) runtime 30 (s) (300833 total samples) + 50.0th: 10 (57022 samples) + 90.0th: 2340 (134374 samples) + * 99.0th: 2372 (12031 samples) + 99.9th: 3756 (1846 samples) + min=1, max=7019 +Request Latencies percentiles (usec) runtime 30 (s) (302830 total samples) + 50.0th: 3516 (90394 samples) + 90.0th: 4792 (124023 samples) + * 99.0th: 6520 (24337 samples) + 99.9th: 7736 (2726 samples) + min=2418, max=13723 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10096 (28 samples) + * 50.0th: 10096 (0 samples) + 90.0th: 10096 (0 samples) + min=9895, max=10116 +current rps: 10104 +Wakeup Latencies percentiles (usec) runtime 30 (s) (300847 total samples) + 50.0th: 10 (57024 samples) + 90.0th: 2340 (134377 samples) + * 99.0th: 2372 (12036 samples) + 99.9th: 3756 (1849 samples) + min=1, max=7019 +Request Latencies percentiles (usec) runtime 30 (s) (302881 total samples) + 50.0th: 3516 (90408 samples) + 90.0th: 4792 (124044 samples) + * 99.0th: 6520 (24342 samples) + 99.9th: 7736 (2726 samples) + min=2418, max=13723 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10096 (28 samples) + * 50.0th: 10096 (0 samples) + 90.0th: 10096 (0 samples) + min=9895, max=10116 +average rps: 10096 +message_threads 1 +worker_threads 48 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/56.txt b/paper_results/schbench/linux_cfs/56.txt new file mode 100644 index 0000000..426564b --- /dev/null +++ b/paper_results/schbench/linux_cfs/56.txt @@ -0,0 +1,72 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101098 total samples) [33/52] + 50.0th: 1266 (26616 samples) + 90.0th: 2348 (41325 samples) + * 99.0th: 4680 (8880 samples) + 99.9th: 5224 (238 samples) + min=1, max=9047 +Request Latencies percentiles (usec) runtime 10 (s) (101666 total samples) + 50.0th: 4648 (33266 samples) + 90.0th: 5592 (37764 samples) + * 99.0th: 7144 (9332 samples) + 99.9th: 9424 (718 samples) + min=2425, max=18666 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10128 (3 samples) + * 50.0th: 10160 (6 samples) + 90.0th: 10192 (2 samples) + min=10129, max=10191 +current rps: 10130 +Wakeup Latencies percentiles (usec) runtime 20 (s) (202144 total samples) + 50.0th: 1278 (52808 samples) + 90.0th: 2348 (82265 samples) + * 99.0th: 4680 (18112 samples) + 99.9th: 4952 (494 samples) + min=1, max=9329 +Request Latencies percentiles (usec) runtime 20 (s) (203344 total samples) + 50.0th: 4648 (64642 samples) + 90.0th: 5512 (77709 samples) + * 99.0th: 7144 (18695 samples) + 99.9th: 9424 (1437 samples) + min=2420, max=18666 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10160 (16 samples) + * 50.0th: 10160 (0 samples) + 90.0th: 10192 (5 samples) + min=10129, max=10191 +current rps: 10159 +Wakeup Latencies percentiles (usec) runtime 30 (s) (303197 total samples) + 50.0th: 1286 (78941 samples) + 90.0th: 2348 (123098 samples) + * 99.0th: 4680 (27368 samples) + 99.9th: 5384 (817 samples) + min=1, max=9329 +Request Latencies percentiles (usec) runtime 30 (s) (304993 total samples) + 50.0th: 4648 (94471 samples) + 90.0th: 5464 (117594 samples) + * 99.0th: 7144 (27974 samples) + 99.9th: 9424 (2245 samples) + min=2420, max=18666 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10160 (24 samples) + * 50.0th: 10160 (0 samples) + 90.0th: 10192 (7 samples) + min=10129, max=10191 +current rps: 10159 +Wakeup Latencies percentiles (usec) runtime 30 (s) (303213 total samples) + 50.0th: 1286 (78942 samples) + 90.0th: 2348 (123104 samples) + * 99.0th: 4680 (27377 samples) + 99.9th: 5384 (817 samples) + min=1, max=9329 +Request Latencies percentiles (usec) runtime 30 (s) (305052 total samples) + 50.0th: 4648 (94491 samples) + 90.0th: 5464 (117615 samples) + * 99.0th: 7144 (27980 samples) + 99.9th: 9424 (2246 samples) + min=2420, max=18666 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10160 (24 samples) + * 50.0th: 10160 (0 samples) + 90.0th: 10192 (7 samples) + min=10129, max=10191 +average rps: 10168 diff --git a/paper_results/schbench/linux_cfs/64.txt b/paper_results/schbench/linux_cfs/64.txt new file mode 100644 index 0000000..414b44a --- /dev/null +++ b/paper_results/schbench/linux_cfs/64.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (100768 total samples) + 50.0th: 2324 (31660 samples) + 90.0th: 4664 (39441 samples) + * 99.0th: 4696 (8512 samples) + 99.9th: 6968 (382 samples) + min=1, max=9409 +Request Latencies percentiles (usec) runtime 10 (s) (101273 total samples) + 50.0th: 4680 (40669 samples) + 90.0th: 7016 (31156 samples) + * 99.0th: 7480 (7964 samples) + 99.9th: 11472 (912 samples) + min=2427, max=25808 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10096 (3 samples) + * 50.0th: 10160 (8 samples) + 90.0th: 10160 (0 samples) + min=9924, max=10161 +current rps: 10161 +Wakeup Latencies percentiles (usec) runtime 20 (s) (201781 total samples) + 50.0th: 2324 (60189 samples) + 90.0th: 4664 (79511 samples) + * 99.0th: 4696 (18022 samples) + 99.9th: 5288 (774 samples) + min=1, max=9409 +Request Latencies percentiles (usec) runtime 20 (s) (202818 total samples) + 50.0th: 4680 (82712 samples) + 90.0th: 7016 (62105 samples) + * 99.0th: 7336 (15377 samples) + 99.9th: 10672 (1793 samples) + min=2424, max=25808 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10160 (21 samples) + * 50.0th: 10160 (0 samples) + 90.0th: 10160 (0 samples) + min=9924, max=10161 +current rps: 10158 +Wakeup Latencies percentiles (usec) runtime 30 (s) (302819 total samples) + 50.0th: 2324 (88947 samples) + 90.0th: 4664 (119560 samples) + * 99.0th: 4680 (25672 samples) + 99.9th: 5288 (2581 samples) + min=1, max=9409 +Request Latencies percentiles (usec) runtime 30 (s) (304356 total samples) + 50.0th: 4680 (124524 samples) + 90.0th: 7016 (92823 samples) + * 99.0th: 7304 (22429 samples) + 99.9th: 10352 (2730 samples) + min=2424, max=25808 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10160 (31 samples) + * 50.0th: 10160 (0 samples) + 90.0th: 10160 (0 samples) + min=9924, max=10161 +current rps: 10149 +Wakeup Latencies percentiles (usec) runtime 30 (s) (302840 total samples) + 50.0th: 2324 (88949 samples) + 90.0th: 4664 (119576 samples) + * 99.0th: 4680 (25673 samples) + 99.9th: 5288 (2582 samples) + min=1, max=9409 +Request Latencies percentiles (usec) runtime 30 (s) (304422 total samples) + 50.0th: 4680 (124548 samples) + 90.0th: 7016 (92850 samples) + * 99.0th: 7304 (22434 samples) + 99.9th: 10352 (2733 samples) + min=2424, max=25808 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10160 (31 samples) + * 50.0th: 10160 (0 samples) + 90.0th: 10160 (0 samples) + min=9924, max=10161 +average rps: 10147 +message_threads 1 +worker_threads 64 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/72.txt b/paper_results/schbench/linux_cfs/72.txt new file mode 100644 index 0000000..28cf405 --- /dev/null +++ b/paper_results/schbench/linux_cfs/72.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101082 total samples) + 50.0th: 2332 (27051 samples) + 90.0th: 4680 (43183 samples) + * 99.0th: 4696 (535 samples) + 99.9th: 7016 (876 samples) + min=1, max=14945 +Request Latencies percentiles (usec) runtime 10 (s) (101467 total samples) + 50.0th: 4680 (34827 samples) + 90.0th: 7032 (36721 samples) + * 99.0th: 8528 (8526 samples) + 99.9th: 13712 (921 samples) + min=2426, max=40000 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10096 (3 samples) + * 50.0th: 10160 (6 samples) + 90.0th: 10192 (2 samples) + min=9953, max=10181 +current rps: 10173 +Wakeup Latencies percentiles (usec) runtime 20 (s) (202408 total samples) + 50.0th: 2332 (52904 samples) + 90.0th: 4680 (87665 samples) + * 99.0th: 4696 (991 samples) + 99.9th: 7016 (1567 samples) + min=1, max=14945 +Request Latencies percentiles (usec) runtime 20 (s) (203218 total samples) + 50.0th: 4680 (70049 samples) + 90.0th: 7032 (73809 samples) + * 99.0th: 7992 (16570 samples) + 99.9th: 11120 (1828 samples) + min=2426, max=40000 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10160 (16 samples) + * 50.0th: 10160 (0 samples) + 90.0th: 10192 (5 samples) + min=9953, max=10181 +current rps: 10168 +Wakeup Latencies percentiles (usec) runtime 30 (s) (303751 total samples) + 50.0th: 2332 (76637 samples) + 90.0th: 4680 (134287 samples) + * 99.0th: 4696 (1440 samples) + 99.9th: 7016 (2209 samples) + min=1, max=14945 +Request Latencies percentiles (usec) runtime 30 (s) (304948 total samples) + 50.0th: 4680 (104788 samples) + 90.0th: 7032 (111125 samples) + * 99.0th: 7848 (24518 samples) + 99.9th: 10352 (2740 samples) + min=2426, max=40000 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10160 (24 samples) + * 50.0th: 10160 (0 samples) + 90.0th: 10192 (7 samples) + min=9953, max=10181 +current rps: 10173 +Wakeup Latencies percentiles (usec) runtime 30 (s) (303774 total samples) + 50.0th: 2332 (76638 samples) + 90.0th: 4680 (134303 samples) + * 99.0th: 4696 (1442 samples) + 99.9th: 7016 (2212 samples) + min=1, max=14945 +Request Latencies percentiles (usec) runtime 30 (s) (305022 total samples) + 50.0th: 4680 (104809 samples) + 90.0th: 7032 (111154 samples) + * 99.0th: 7848 (24531 samples) + 99.9th: 10352 (2740 samples) + min=2426, max=40000 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10160 (24 samples) + * 50.0th: 10160 (0 samples) + 90.0th: 10192 (7 samples) + min=9953, max=10181 +average rps: 10167 +message_threads 1 +worker_threads 72 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/8.txt b/paper_results/schbench/linux_cfs/8.txt new file mode 100644 index 0000000..b2d5aa0 --- /dev/null +++ b/paper_results/schbench/linux_cfs/8.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (31995 total samples) + 50.0th: 6 (20021 samples) + 90.0th: 7 (4015 samples) + * 99.0th: 8 (485 samples) + 99.9th: 10 (104 samples) + min=1, max=60 +Request Latencies percentiles (usec) runtime 10 (s) (32037 total samples) + 50.0th: 2484 (15517 samples) + 90.0th: 2492 (7005 samples) + * 99.0th: 2516 (954 samples) + 99.9th: 2676 (278 samples) + min=2419, max=5754 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 3204 (10 samples) + * 50.0th: 3204 (0 samples) + 90.0th: 3204 (0 samples) + min=3189, max=3211 +current rps: 3205 +Wakeup Latencies percentiles (usec) runtime 20 (s) (64003 total samples) + 50.0th: 6 (40837 samples) + 90.0th: 7 (8816 samples) + * 99.0th: 8 (1188 samples) + 99.9th: 9 (159 samples) + min=1, max=60 +Request Latencies percentiles (usec) runtime 20 (s) (64090 total samples) + 50.0th: 2484 (32518 samples) + 90.0th: 2492 (13570 samples) + * 99.0th: 2516 (1802 samples) + 99.9th: 2588 (459 samples) + min=2419, max=5754 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 3204 (19 samples) + * 50.0th: 3204 (0 samples) + 90.0th: 3204 (0 samples) + min=3189, max=3211 +current rps: 3207 +Wakeup Latencies percentiles (usec) runtime 30 (s) (96016 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (13620 samples) + * 99.0th: 8 (1719 samples) + 99.9th: 9 (231 samples) + min=1, max=60 +Request Latencies percentiles (usec) runtime 30 (s) (96162 total samples) + 50.0th: 2484 (47279 samples) + 90.0th: 2492 (18539 samples) + * 99.0th: 2516 (2741 samples) + 99.9th: 2556 (712 samples) + min=2419, max=5754 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 3204 (25 samples) + * 50.0th: 3204 (0 samples) + 90.0th: 3212 (6 samples) + min=3189, max=3211 +current rps: 3206 +Wakeup Latencies percentiles (usec) runtime 30 (s) (96016 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (13620 samples) + * 99.0th: 8 (1719 samples) + 99.9th: 9 (231 samples) + min=1, max=60 +Request Latencies percentiles (usec) runtime 30 (s) (96170 total samples) + 50.0th: 2484 (47284 samples) + 90.0th: 2492 (18541 samples) + * 99.0th: 2516 (2741 samples) + 99.9th: 2556 (712 samples) + min=2419, max=5754 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 3204 (25 samples) + * 50.0th: 3204 (0 samples) + 90.0th: 3212 (6 samples) + min=3189, max=3211 +average rps: 3206 +message_threads 1 +worker_threads 8 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/80.txt b/paper_results/schbench/linux_cfs/80.txt new file mode 100644 index 0000000..58deb0a --- /dev/null +++ b/paper_results/schbench/linux_cfs/80.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101156 total samples) + 50.0th: 2340 (40595 samples) + 90.0th: 4680 (29454 samples) + * 99.0th: 7016 (7406 samples) + 99.9th: 9328 (408 samples) + min=1, max=13858 +Request Latencies percentiles (usec) runtime 10 (s) (101533 total samples) + 50.0th: 4696 (30946 samples) + 90.0th: 7256 (40101 samples) + * 99.0th: 9552 (9118 samples) + 99.9th: 14032 (874 samples) + min=2425, max=37752 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10128 (3 samples) + * 50.0th: 10192 (8 samples) + 90.0th: 10192 (0 samples) + min=9949, max=10187 +current rps: 10186 +Wakeup Latencies percentiles (usec) runtime 20 (s) (202594 total samples) + 50.0th: 2340 (80670 samples) + 90.0th: 4680 (59466 samples) + * 99.0th: 7016 (14742 samples) + 99.9th: 9296 (650 samples) + min=1, max=13858 +Request Latencies percentiles (usec) runtime 20 (s) (203400 total samples) + 50.0th: 4696 (62650 samples) + 90.0th: 7240 (79830 samples) + * 99.0th: 9520 (18237 samples) + 99.9th: 13680 (1704 samples) + min=2424, max=37752 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10192 (21 samples) + * 50.0th: 10192 (0 samples) + 90.0th: 10192 (0 samples) + min=9949, max=10190 +current rps: 10181 +Wakeup Latencies percentiles (usec) runtime 30 (s) (304019 total samples) + 50.0th: 2340 (121036 samples) + 90.0th: 4680 (89248 samples) + * 99.0th: 7016 (21996 samples) + 99.9th: 9296 (953 samples) + min=1, max=14066 +Request Latencies percentiles (usec) runtime 30 (s) (305212 total samples) + 50.0th: 4696 (94257 samples) + 90.0th: 7240 (119849 samples) + * 99.0th: 9520 (27342 samples) + 99.9th: 12144 (2413 samples) + min=2424, max=37752 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10192 (31 samples) + * 50.0th: 10192 (0 samples) + 90.0th: 10192 (0 samples) + min=9949, max=10195 +current rps: 10135 +Wakeup Latencies percentiles (usec) runtime 30 (s) (304042 total samples) + 50.0th: 2340 (121041 samples) + 90.0th: 4680 (89256 samples) + * 99.0th: 7016 (22003 samples) + 99.9th: 9296 (955 samples) + min=1, max=14066 +Request Latencies percentiles (usec) runtime 30 (s) (305294 total samples) + 50.0th: 4696 (94275 samples) + 90.0th: 7240 (119891 samples) + * 99.0th: 9520 (27349 samples) + 99.9th: 12144 (2416 samples) + min=2424, max=37752 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10192 (31 samples) + * 50.0th: 10192 (0 samples) + 90.0th: 10192 (0 samples) + min=9949, max=10195 +average rps: 10176 +message_threads 1 +worker_threads 80 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/88.txt b/paper_results/schbench/linux_cfs/88.txt new file mode 100644 index 0000000..2b5bbbc --- /dev/null +++ b/paper_results/schbench/linux_cfs/88.txt @@ -0,0 +1,72 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101142 total samples) + 50.0th: 2340 (29424 samples) + 90.0th: 6984 (36588 samples) + * 99.0th: 7032 (7374 samples) + 99.9th: 9360 (703 samples) + min=1, max=18343 +Request Latencies percentiles (usec) runtime 10 (s) (101503 total samples) + 50.0th: 4808 (31245 samples) + 90.0th: 9328 (40529 samples) + * 99.0th: 10928 (8409 samples) + 99.9th: 13552 (914 samples) + min=2433, max=45813 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10160 (4 samples) + * 50.0th: 10192 (6 samples) + 90.0th: 10192 (0 samples) + min=9781, max=10210 +current rps: 10191 +Wakeup Latencies percentiles (usec) runtime 20 (s) (201643 total samples) + 50.0th: 2340 (58450 samples) + 90.0th: 6984 (72371 samples) + * 99.0th: 7032 (14996 samples) + 99.9th: 9456 (1607 samples) + min=1, max=18343 +Request Latencies percentiles (usec) runtime 20 (s) (202363 total samples) + 50.0th: 4808 (61140 samples) + 90.0th: 9328 (81364 samples) + * 99.0th: 10928 (17370 samples) + 99.9th: 14192 (1820 samples) + min=2426, max=45813 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10064 (5 samples) + * 50.0th: 10192 (15 samples) + 90.0th: 10192 (0 samples) + min=9677, max=10210 +current rps: 10194 +qWakeup Latencies percentiles (usec) runtime 30 (s) (303186 total samples) + 50.0th: 2340 (88027 samples) + 90.0th: 6984 (107925 samples) + * 99.0th: 7032 (22903 samples) + 99.9th: 9360 (2073 samples) + min=1, max=18343 +Request Latencies percentiles (usec) runtime 30 (s) (304297 total samples) + 50.0th: 4808 (92203 samples) + 90.0th: 9328 (122257 samples) + * 99.0th: 10160 (25826 samples) + 99.9th: 13744 (2738 samples) + min=2426, max=45813 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10160 (8 samples) + * 50.0th: 10192 (22 samples) + 90.0th: 10192 (0 samples) + min=9677, max=10210 +current rps: 10193 +Wakeup Latencies percentiles (usec) runtime 30 (s) (303220 total samples) + 50.0th: 2340 (88029 samples) + 90.0th: 6984 (107951 samples) + * 99.0th: 7032 (22907 samples) + 99.9th: 9360 (2074 samples) + min=1, max=18343 +Request Latencies percentiles (usec) runtime 30 (s) (304389 total samples) + 50.0th: 4808 (92238 samples) + 90.0th: 9328 (122287 samples) + * 99.0th: 10160 (25837 samples) + 99.9th: 13744 (2738 samples) + min=2426, max=45813 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10160 (8 samples) + * 50.0th: 10192 (22 samples) + 90.0th: 10192 (0 samples) + min=9677, max=10210 +average rps: 10146 diff --git a/paper_results/schbench/linux_cfs/96.txt b/paper_results/schbench/linux_cfs/96.txt new file mode 100644 index 0000000..7c25c93 --- /dev/null +++ b/paper_results/schbench/linux_cfs/96.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101421 total samples) + 50.0th: 2340 (26322 samples) + 90.0th: 7000 (41711 samples) + * 99.0th: 7048 (7814 samples) + 99.9th: 9616 (887 samples) + min=1, max=18271 +Request Latencies percentiles (usec) runtime 10 (s) (101706 total samples) + 50.0th: 5992 (30528 samples) + 90.0th: 9360 (44060 samples) + * 99.0th: 11696 (6058 samples) + 99.9th: 18464 (614 samples) + min=2422, max=58149 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10128 (3 samples) + * 50.0th: 10192 (7 samples) + 90.0th: 10192 (0 samples) + min=9950, max=10210 +current rps: 10205 +Wakeup Latencies percentiles (usec) runtime 20 (s) (203024 total samples) + 50.0th: 2348 (57544 samples) + 90.0th: 7000 (77026 samples) + * 99.0th: 7048 (17123 samples) + 99.9th: 9360 (1605 samples) + min=1, max=18271 +Request Latencies percentiles (usec) runtime 20 (s) (203718 total samples) + 50.0th: 5992 (61073 samples) + 90.0th: 9360 (88631 samples) + * 99.0th: 11664 (11355 samples) + 99.9th: 15248 (1642 samples) + min=2422, max=58149 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10192 (19 samples) + * 50.0th: 10192 (0 samples) + 90.0th: 10192 (0 samples) + min=9950, max=10214 +current rps: 10204 +Wakeup Latencies percentiles (usec) runtime 30 (s) (304733 total samples) + 50.0th: 2348 (85913 samples) + 90.0th: 7000 (114780 samples) + * 99.0th: 7048 (26823 samples) + 99.9th: 9360 (2395 samples) + min=1, max=18271 +Request Latencies percentiles (usec) runtime 30 (s) (305766 total samples) + 50.0th: 5992 (91702 samples) + 90.0th: 9360 (133167 samples) + * 99.0th: 11632 (16669 samples) + 99.9th: 14160 (2741 samples) + min=2422, max=58149 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10192 (27 samples) + * 50.0th: 10192 (0 samples) + 90.0th: 10224 (4 samples) + min=9950, max=10214 +current rps: 10195 +Wakeup Latencies percentiles (usec) runtime 30 (s) (304771 total samples) + 50.0th: 2348 (85916 samples) + 90.0th: 7000 (114802 samples) + * 99.0th: 7048 (26833 samples) + 99.9th: 9360 (2398 samples) + min=1, max=18271 +Request Latencies percentiles (usec) runtime 30 (s) (305863 total samples) + 50.0th: 6008 (91824 samples) + 90.0th: 9360 (133116 samples) + * 99.0th: 11632 (16679 samples) + 99.9th: 14160 (2741 samples) + min=2422, max=58149 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10192 (27 samples) + * 50.0th: 10192 (0 samples) + 90.0th: 10224 (4 samples) + min=9950, max=10214 +average rps: 10195 +message_threads 1 +worker_threads 96 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_cfs/all.csv b/paper_results/schbench/linux_cfs/all.csv new file mode 100644 index 0000000..7b1a5e7 --- /dev/null +++ b/paper_results/schbench/linux_cfs/all.csv @@ -0,0 +1,24 @@ +cores,wake99,rps50,lat99 +4,20,1602,2524 +8,8,3204,2516 +16,8,6408,2508 +24,17,9584,2532 +32,2340,9936,4792 +40,2348,10032,4840 +48,2372,10096,6520 +56,4680,10160,7144 +64,4680,10160,7304 +72,4696,10160,7848 +80,7016,10192,9520 +88,7032,10192,10160 +96,7048,10192,11632 +112,9360,10224,12048 +128,11696,10224,14160 +144,11760,10192,16272 +160,14032,10224,16928 +176,16368,10224,18848 +192,16416,10224,19872 +208,18784,10192,21344 +224,21024,10224,23456 +240,23328,10224,25568 +256,25568,10256,26912 diff --git a/paper_results/schbench/linux_fifo/112.txt b/paper_results/schbench/linux_fifo/112.txt new file mode 100644 index 0000000..83df83f --- /dev/null +++ b/paper_results/schbench/linux_fifo/112.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96484 total samples) + 50.0th: 1394 (28982 samples) + 90.0th: 6760 (38556 samples) + * 99.0th: 15024 (8675 samples) + 99.9th: 56256 (870 samples) + min=1, max=69331 +Request Latencies percentiles (usec) runtime 10 (s) (96421 total samples) + 50.0th: 4760 (29210 samples) + 90.0th: 10032 (38214 samples) + * 99.0th: 21280 (8700 samples) + 99.9th: 59712 (837 samples) + min=2422, max=75682 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9616 (4 samples) + * 50.0th: 9648 (7 samples) + 90.0th: 9648 (0 samples) + min=9571, max=9644 +current rps: 9638 +Wakeup Latencies percentiles (usec) runtime 20 (s) (193161 total samples) + 50.0th: 1378 (57971 samples) + 90.0th: 6792 (77275 samples) + * 99.0th: 15056 (17337 samples) + 99.9th: 56384 (1729 samples) + min=1, max=73548 +Request Latencies percentiles (usec) runtime 20 (s) (193134 total samples) + 50.0th: 4760 (58621 samples) + 90.0th: 10064 (76488 samples) + * 99.0th: 21280 (17371 samples) + 99.9th: 60480 (1731 samples) + min=2422, max=76104 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9648 (21 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9571, max=9644 +current rps: 9635 +Wakeup Latencies percentiles (usec) runtime 30 (s) (289813 total samples) + 50.0th: 1374 (87115 samples) + 90.0th: 6792 (115728 samples) + * 99.0th: 15312 (26079 samples) + 99.9th: 56640 (2623 samples) + min=1, max=73885 +Request Latencies percentiles (usec) runtime 30 (s) (289795 total samples) + 50.0th: 4760 (88053 samples) + 90.0th: 10096 (114655 samples) + * 99.0th: 21280 (26065 samples) + 99.9th: 60736 (2582 samples) + min=2421, max=80102 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (31 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9571, max=9644 +current rps: 9639 +Wakeup Latencies percentiles (usec) runtime 30 (s) (289865 total samples) + 50.0th: 1374 (87123 samples) + 90.0th: 6792 (115755 samples) + * 99.0th: 15312 (26087 samples) + 99.9th: 56640 (2623 samples) + min=1, max=73885 +Request Latencies percentiles (usec) runtime 30 (s) (289909 total samples) + 50.0th: 4760 (88081 samples) + 90.0th: 10096 (114705 samples) + * 99.0th: 21280 (26078 samples) + 99.9th: 60736 (2582 samples) + min=2421, max=80102 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (31 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9571, max=9644 +average rps: 9664 +message_threads 1 +worker_threads 112 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/128.txt b/paper_results/schbench/linux_fifo/128.txt new file mode 100644 index 0000000..71371c5 --- /dev/null +++ b/paper_results/schbench/linux_fifo/128.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96544 total samples) + 50.0th: 1606 (29014 samples) + 90.0th: 8168 (38578 samples) + * 99.0th: 18400 (8681 samples) + 99.9th: 59968 (860 samples) + min=1, max=77793 +Request Latencies percentiles (usec) runtime 10 (s) (96515 total samples) + 50.0th: 5480 (28912 samples) + 90.0th: 11792 (38847 samples) + * 99.0th: 23648 (8493 samples) + 99.9th: 63808 (827 samples) + min=2420, max=79460 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9648 (11 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9579, max=9648 +current rps: 9635 +Wakeup Latencies percentiles (usec) runtime 20 (s) (193173 total samples) + 50.0th: 1618 (57988 samples) + 90.0th: 8168 (77266 samples) + * 99.0th: 18336 (17364 samples) + 99.9th: 59328 (1726 samples) + min=1, max=80171 +Request Latencies percentiles (usec) runtime 20 (s) (193124 total samples) + 50.0th: 5464 (58018 samples) + 90.0th: 11792 (77958 samples) + * 99.0th: 23648 (16584 samples) + 99.9th: 63808 (1744 samples) + min=2420, max=80343 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9648 (21 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9579, max=9653 +current rps: 9651 +Wakeup Latencies percentiles (usec) runtime 30 (s) (289865 total samples) + 50.0th: 1610 (86896 samples) + 90.0th: 8208 (116000 samples) + * 99.0th: 18272 (26047 samples) + 99.9th: 59200 (2580 samples) + min=1, max=83429 +Request Latencies percentiles (usec) runtime 30 (s) (289818 total samples) + 50.0th: 5432 (86885 samples) + 90.0th: 11792 (116692 samples) + * 99.0th: 23712 (25288 samples) + 99.9th: 63808 (2588 samples) + min=2420, max=89381 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (31 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9579, max=9653 +current rps: 9646 +Wakeup Latencies percentiles (usec) runtime 30 (s) (289916 total samples) + 50.0th: 1610 (86898 samples) + 90.0th: 8208 (116033 samples) + * 99.0th: 18272 (26057 samples) + 99.9th: 59200 (2580 samples) + min=1, max=83429 +Request Latencies percentiles (usec) runtime 30 (s) (289950 total samples) + 50.0th: 5432 (86916 samples) + 90.0th: 11792 (116762 samples) + * 99.0th: 23712 (25298 samples) + 99.9th: 63808 (2588 samples) + min=2420, max=89381 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (31 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9579, max=9653 +average rps: 9665 +message_threads 1 +worker_threads 128 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/144.txt b/paper_results/schbench/linux_fifo/144.txt new file mode 100644 index 0000000..e0084fb --- /dev/null +++ b/paper_results/schbench/linux_fifo/144.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96945 total samples) + 50.0th: 1950 (29152 samples) + 90.0th: 9520 (38732 samples) + * 99.0th: 20320 (8708 samples) + 99.9th: 62656 (869 samples) + min=1, max=90535 +Request Latencies percentiles (usec) runtime 10 (s) (96885 total samples) + 50.0th: 7032 (29831 samples) + 90.0th: 13392 (37966 samples) + * 99.0th: 26016 (8712 samples) + 99.9th: 64448 (869 samples) + min=2422, max=82494 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9616 (3 samples) + * 50.0th: 9648 (8 samples) + 90.0th: 9648 (0 samples) + min=9609, max=9657 +current rps: 9648 +Wakeup Latencies percentiles (usec) runtime 20 (s) (193691 total samples) + 50.0th: 1890 (58110 samples) + 90.0th: 9488 (77454 samples) + * 99.0th: 21024 (17434 samples) + 99.9th: 61888 (1711 samples) + min=1, max=90535 +Request Latencies percentiles (usec) runtime 20 (s) (193611 total samples) + 50.0th: 7032 (59846 samples) + 90.0th: 13456 (75622 samples) + * 99.0th: 26208 (17394 samples) + 99.9th: 64576 (1746 samples) + min=2422, max=82494 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9648 (21 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9609, max=9658 +current rps: 9643 +Wakeup Latencies percentiles (usec) runtime 30 (s) (290484 total samples) + 50.0th: 1886 (87258 samples) + 90.0th: 9520 (116215 samples) + * 99.0th: 21152 (26021 samples) + 99.9th: 61888 (2575 samples) + min=1, max=90535 +Request Latencies percentiles (usec) runtime 30 (s) (290427 total samples) + 50.0th: 7032 (90006 samples) + 90.0th: 13552 (113182 samples) + * 99.0th: 27168 (26078 samples) + 99.9th: 65216 (2612 samples) + min=2422, max=83602 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (31 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9609, max=9660 +current rps: 9646 +Wakeup Latencies percentiles (usec) runtime 30 (s) (290544 total samples) + 50.0th: 1886 (87264 samples) + 90.0th: 9520 (116255 samples) + * 99.0th: 21152 (26030 samples) + 99.9th: 61888 (2575 samples) + min=1, max=90535 +Request Latencies percentiles (usec) runtime 30 (s) (290575 total samples) + 50.0th: 7032 (90028 samples) + 90.0th: 13552 (113243 samples) + * 99.0th: 27104 (26096 samples) + 99.9th: 65216 (2616 samples) + min=2422, max=83602 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (31 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9609, max=9660 +average rps: 9686 +message_threads 1 +worker_threads 144 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/16.txt b/paper_results/schbench/linux_fifo/16.txt new file mode 100644 index 0000000..5b5455f --- /dev/null +++ b/paper_results/schbench/linux_fifo/16.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (64895 total samples) + 50.0th: 6 (36102 samples) + 90.0th: 7 (10865 samples) + * 99.0th: 9 (3227 samples) + 99.9th: 28 (439 samples) + min=1, max=7249 +Request Latencies percentiles (usec) runtime 10 (s) (65090 total samples) + 50.0th: 2436 (36703 samples) + 90.0th: 2444 (7187 samples) + * 99.0th: 2460 (681 samples) + 99.9th: 7992 (485 samples) + min=2416, max=8607 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 6504 (9 samples) + * 50.0th: 6504 (0 samples) + 90.0th: 6520 (1 samples) + min=6502, max=6534 +current rps: 6513 +Wakeup Latencies percentiles (usec) runtime 20 (s) (129756 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (23592 samples) + * 99.0th: 9 (7436 samples) + 99.9th: 3212 (967 samples) + min=1, max=7932 +Request Latencies percentiles (usec) runtime 20 (s) (130157 total samples) + 50.0th: 2436 (75998 samples) + 90.0th: 2444 (14109 samples) + * 99.0th: 2452 (747 samples) + 99.9th: 7992 (1087 samples) + min=2416, max=8607 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 6504 (19 samples) + * 50.0th: 6504 (0 samples) + 90.0th: 6504 (0 samples) + min=6502, max=6534 +current rps: 6510 +Wakeup Latencies percentiles (usec) runtime 30 (s) (194636 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (37092 samples) + * 99.0th: 9 (11900 samples) + 99.9th: 3468 (1462 samples) + min=1, max=7932 +Request Latencies percentiles (usec) runtime 30 (s) (195232 total samples) + 50.0th: 2436 (114098 samples) + 90.0th: 2444 (20725 samples) + * 99.0th: 2452 (943 samples) + 99.9th: 7992 (1400 samples) + min=2416, max=10250 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 6504 (28 samples) + * 50.0th: 6504 (0 samples) + 90.0th: 6504 (0 samples) + min=6499, max=6534 +current rps: 6509 +Wakeup Latencies percentiles (usec) runtime 30 (s) (194636 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (37092 samples) + * 99.0th: 9 (11900 samples) + 99.9th: 3468 (1462 samples) + min=1, max=7932 +Request Latencies percentiles (usec) runtime 30 (s) (195248 total samples) + 50.0th: 2436 (114104 samples) + 90.0th: 2444 (20729 samples) + * 99.0th: 2452 (943 samples) + 99.9th: 7992 (1401 samples) + min=2416, max=10250 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 6504 (28 samples) + * 50.0th: 6504 (0 samples) + 90.0th: 6504 (0 samples) + min=6499, max=6534 +average rps: 6508 +message_threads 1 +worker_threads 16 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/160.txt b/paper_results/schbench/linux_fifo/160.txt new file mode 100644 index 0000000..45b9c78 --- /dev/null +++ b/paper_results/schbench/linux_fifo/160.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97069 total samples) + 50.0th: 2116 (29165 samples) + 90.0th: 10960 (38776 samples) + * 99.0th: 23072 (8732 samples) + 99.9th: 63680 (862 samples) + min=1, max=88962 +Request Latencies percentiles (usec) runtime 10 (s) (97013 total samples) + 50.0th: 7064 (28090 samples) + 90.0th: 14160 (37839 samples) + * 99.0th: 29344 (8052 samples) + 99.9th: 67200 (873 samples) + min=2420, max=91936 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9648 (10 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9590, max=9680 +current rps: 9659 +Wakeup Latencies percentiles (usec) runtime 20 (s) (193804 total samples) + 50.0th: 2092 (58232 samples) + 90.0th: 11088 (77412 samples) + * 99.0th: 23264 (17381 samples) + 99.9th: 63680 (1740 samples) + min=1, max=88962 +Request Latencies percentiles (usec) runtime 20 (s) (193766 total samples) + 50.0th: 7064 (61240 samples) + 90.0th: 14160 (74887 samples) + * 99.0th: 30496 (16896 samples) + 99.9th: 67200 (1740 samples) + min=2420, max=91936 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9648 (19 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9197, max=9680 +current rps: 9664 +Wakeup Latencies percentiles (usec) runtime 30 (s) (290821 total samples) + 50.0th: 2108 (87615 samples) + 90.0th: 11120 (115948 samples) + * 99.0th: 23200 (26181 samples) + 99.9th: 64832 (2615 samples) + min=1, max=88962 +Request Latencies percentiles (usec) runtime 30 (s) (290816 total samples) + 50.0th: 7064 (91808 samples) + 90.0th: 14160 (112201 samples) + * 99.0th: 28832 (25467 samples) + 99.9th: 68480 (2634 samples) + min=2420, max=91936 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (28 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9197, max=9680 +current rps: 9658 +Wakeup Latencies percentiles (usec) runtime 30 (s) (290957 total samples) + 50.0th: 2100 (87257 samples) + 90.0th: 11120 (116375 samples) + * 99.0th: 23200 (26184 samples) + 99.9th: 64832 (2615 samples) + min=1, max=88962 +Request Latencies percentiles (usec) runtime 30 (s) (290985 total samples) + 50.0th: 7064 (91846 samples) + 90.0th: 14160 (112286 samples) + * 99.0th: 28768 (25479 samples) + 99.9th: 68480 (2636 samples) + min=2420, max=91936 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (28 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9197, max=9680 +average rps: 9700 +message_threads 1 +worker_threads 160 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/176.txt b/paper_results/schbench/linux_fifo/176.txt new file mode 100644 index 0000000..ec5e99c --- /dev/null +++ b/paper_results/schbench/linux_fifo/176.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96837 total samples) + 50.0th: 2308 (29057 samples) + 90.0th: 12592 (38767 samples) + * 99.0th: 25056 (8672 samples) + 99.9th: 66688 (867 samples) + min=1, max=84953 +Request Latencies percentiles (usec) runtime 10 (s) (96783 total samples) + 50.0th: 7080 (28558 samples) + 90.0th: 16416 (39154 samples) + * 99.0th: 30816 (8260 samples) + 99.9th: 68992 (870 samples) + min=2426, max=89730 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9648 (9 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9680 (2 samples) + min=9582, max=9675 +current rps: 9660 +Wakeup Latencies percentiles (usec) runtime 20 (s) (193839 total samples) + 50.0th: 2300 (58394 samples) + 90.0th: 12368 (77302 samples) + * 99.0th: 25184 (17430 samples) + 99.9th: 66176 (1748 samples) + min=1, max=84953 +Request Latencies percentiles (usec) runtime 20 (s) (193769 total samples) + 50.0th: 7080 (57267 samples) + 90.0th: 16416 (78433 samples) + * 99.0th: 32544 (16256 samples) + 99.9th: 68992 (1745 samples) + min=2425, max=89962 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9648 (15 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9680 (6 samples) + min=9582, max=9675 +current rps: 9663 +Wakeup Latencies percentiles (usec) runtime 30 (s) (290898 total samples) + 50.0th: 2300 (87585 samples) + 90.0th: 12368 (116105 samples) + * 99.0th: 25248 (26095 samples) + 99.9th: 66176 (2611 samples) + min=1, max=84953 +Request Latencies percentiles (usec) runtime 30 (s) (290836 total samples) + 50.0th: 7080 (86087 samples) + 90.0th: 16416 (117469 samples) + * 99.0th: 32832 (24483 samples) + 99.9th: 68992 (2577 samples) + min=2424, max=89962 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (17 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9680 (14 samples) + min=9582, max=9675 +current rps: 9668 +Wakeup Latencies percentiles (usec) runtime 30 (s) (290988 total samples) + 50.0th: 2300 (87600 samples) + 90.0th: 12368 (116149 samples) + * 99.0th: 25248 (26122 samples) + 99.9th: 66176 (2611 samples) + min=1, max=84953 +Request Latencies percentiles (usec) runtime 30 (s) (291019 total samples) + 50.0th: 7080 (86130 samples) + 90.0th: 16416 (117547 samples) + * 99.0th: 32832 (24508 samples) + 99.9th: 68992 (2577 samples) + min=2424, max=89962 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (17 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9680 (14 samples) + min=9582, max=9675 +average rps: 9701 +message_threads 1 +worker_threads 176 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/192.txt b/paper_results/schbench/linux_fifo/192.txt new file mode 100644 index 0000000..757dede --- /dev/null +++ b/paper_results/schbench/linux_fifo/192.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97377 total samples) + 50.0th: 2380 (29268 samples) + 90.0th: 13456 (38887 samples) + * 99.0th: 28000 (8756 samples) + 99.9th: 67456 (876 samples) + min=1, max=79283 +Request Latencies percentiles (usec) runtime 10 (s) (97275 total samples) + 50.0th: 7240 (26502 samples) + 90.0th: 16480 (38955 samples) + * 99.0th: 35264 (8731 samples) + 99.9th: 68736 (844 samples) + min=2424, max=83740 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9680 (11 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9604, max=9687 +current rps: 9679 +Wakeup Latencies percentiles (usec) runtime 20 (s) (195484 total samples) + 50.0th: 2388 (58613 samples) + 90.0th: 13584 (78179 samples) + * 99.0th: 27616 (17592 samples) + 99.9th: 67712 (1762 samples) + min=1, max=88075 +Request Latencies percentiles (usec) runtime 20 (s) (195385 total samples) + 50.0th: 7224 (53420 samples) + 90.0th: 16544 (78558 samples) + * 99.0th: 35264 (17125 samples) + 99.9th: 70784 (1678 samples) + min=2421, max=91856 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9680 (21 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9604, max=9689 +current rps: 9683 +Wakeup Latencies percentiles (usec) runtime 30 (s) (293038 total samples) + 50.0th: 2380 (87934 samples) + 90.0th: 13584 (117160 samples) + * 99.0th: 27616 (26350 samples) + 99.9th: 67968 (2642 samples) + min=1, max=88075 +Request Latencies percentiles (usec) runtime 30 (s) (292978 total samples) + 50.0th: 7224 (79992 samples) + 90.0th: 16544 (118171 samples) + * 99.0th: 33472 (25380 samples) + 99.9th: 71296 (2636 samples) + min=2421, max=91856 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (31 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9604, max=9689 +current rps: 9671 +Wakeup Latencies percentiles (usec) runtime 30 (s) (293151 total samples) + 50.0th: 2380 (87953 samples) + 90.0th: 13584 (117227 samples) + * 99.0th: 27616 (26375 samples) + 99.9th: 67968 (2644 samples) + min=1, max=88075 +Request Latencies percentiles (usec) runtime 30 (s) (293175 total samples) + 50.0th: 7240 (80241 samples) + 90.0th: 16544 (118059 samples) + * 99.0th: 33472 (25402 samples) + 99.9th: 71296 (2636 samples) + min=2421, max=91856 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (31 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9604, max=9689 +average rps: 9773 +message_threads 1 +worker_threads 192 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/208.txt b/paper_results/schbench/linux_fifo/208.txt new file mode 100644 index 0000000..ef91e88 --- /dev/null +++ b/paper_results/schbench/linux_fifo/208.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97266 total samples) + 50.0th: 2700 (29149 samples) + 90.0th: 15248 (38920 samples) + * 99.0th: 30304 (8741 samples) + 99.9th: 68480 (883 samples) + min=1, max=83268 +Request Latencies percentiles (usec) runtime 10 (s) (97157 total samples) + 50.0th: 9328 (28958 samples) + 90.0th: 18784 (38923 samples) + * 99.0th: 40512 (8524 samples) + 99.9th: 70784 (874 samples) + min=2418, max=92694 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9648 (3 samples) + * 50.0th: 9680 (8 samples) + 90.0th: 9680 (0 samples) + min=9595, max=9677 +current rps: 9674 +Wakeup Latencies percentiles (usec) runtime 20 (s) (194550 total samples) + 50.0th: 2764 (58418 samples) + 90.0th: 15152 (77819 samples) + * 99.0th: 29920 (17444 samples) + 99.9th: 69760 (1763 samples) + min=1, max=87173 +Request Latencies percentiles (usec) runtime 20 (s) (194492 total samples) + 50.0th: 9328 (57912 samples) + 90.0th: 18784 (78243 samples) + * 99.0th: 41536 (16748 samples) + 99.9th: 73088 (1766 samples) + min=2418, max=92694 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9680 (21 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9595, max=9680 +current rps: 9680 +Wakeup Latencies percentiles (usec) runtime 30 (s) (291596 total samples) + 50.0th: 2684 (87578 samples) + 90.0th: 14992 (116532 samples) + * 99.0th: 30176 (26226 samples) + 99.9th: 68736 (2618 samples) + min=1, max=91988 +Request Latencies percentiles (usec) runtime 30 (s) (291520 total samples) + 50.0th: 9360 (91283 samples) + 90.0th: 18784 (114232 samples) + * 99.0th: 40000 (24244 samples) + 99.9th: 73088 (2621 samples) + min=2418, max=94441 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (31 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9595, max=9682 +current rps: 9682 +Wakeup Latencies percentiles (usec) runtime 30 (s) (291706 total samples) + 50.0th: 2684 (87583 samples) + 90.0th: 14992 (116599 samples) + * 99.0th: 30176 (26234 samples) + 99.9th: 68736 (2618 samples) + min=1, max=91988 +Request Latencies percentiles (usec) runtime 30 (s) (291734 total samples) + 50.0th: 9360 (91359 samples) + 90.0th: 18784 (114292 samples) + * 99.0th: 40000 (24267 samples) + 99.9th: 73088 (2621 samples) + min=2418, max=94441 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (31 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9595, max=9682 +average rps: 9724 +message_threads 1 +worker_threads 208 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/224.txt b/paper_results/schbench/linux_fifo/224.txt new file mode 100644 index 0000000..571d67e --- /dev/null +++ b/paper_results/schbench/linux_fifo/224.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97611 total samples) + 50.0th: 2956 (29282 samples) + 90.0th: 16272 (39032 samples) + * 99.0th: 32736 (8774 samples) + 99.9th: 69248 (879 samples) + min=1, max=87575 +Request Latencies percentiles (usec) runtime 10 (s) (97499 total samples) + 50.0th: 9392 (29380 samples) + 90.0th: 19744 (37786 samples) + * 99.0th: 40000 (8764 samples) + 99.9th: 73088 (885 samples) + min=2425, max=92001 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9680 (11 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9613, max=9686 +current rps: 9676 +Wakeup Latencies percentiles (usec) runtime 20 (s) (195353 total samples) + 50.0th: 3068 (58586 samples) + 90.0th: 16336 (78217 samples) + * 99.0th: 32352 (17495 samples) + 99.9th: 70784 (1750 samples) + min=1, max=87575 +Request Latencies percentiles (usec) runtime 20 (s) (195245 total samples) + 50.0th: 9392 (59031 samples) + 90.0th: 19488 (75282 samples) + * 99.0th: 40384 (17554 samples) + 99.9th: 74112 (1759 samples) + min=2423, max=94457 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9680 (20 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9613, max=9696 +current rps: 9676 +Wakeup Latencies percentiles (usec) runtime 30 (s) (293185 total samples) + 50.0th: 3172 (88000 samples) + 90.0th: 16416 (117379 samples) + * 99.0th: 33216 (26221 samples) + 99.9th: 72320 (2633 samples) + min=1, max=101728 +Request Latencies percentiles (usec) runtime 30 (s) (293110 total samples) + 50.0th: 9392 (88656 samples) + 90.0th: 19872 (113041 samples) + * 99.0th: 44608 (26357 samples) + 99.9th: 75648 (2632 samples) + min=2423, max=102551 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (30 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9236, max=9696 +current rps: 9680 +Wakeup Latencies percentiles (usec) runtime 30 (s) (293318 total samples) + 50.0th: 3172 (88041 samples) + 90.0th: 16416 (117423 samples) + * 99.0th: 33216 (26225 samples) + 99.9th: 72320 (2633 samples) + min=1, max=101728 +Request Latencies percentiles (usec) runtime 30 (s) (293339 total samples) + 50.0th: 9392 (88727 samples) + 90.0th: 19872 (113144 samples) + * 99.0th: 44608 (26366 samples) + 99.9th: 75648 (2632 samples) + min=2423, max=102551 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (30 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9236, max=9696 +average rps: 9778 +message_threads 1 +worker_threads 224 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/24.txt b/paper_results/schbench/linux_fifo/24.txt new file mode 100644 index 0000000..db8cc19 --- /dev/null +++ b/paper_results/schbench/linux_fifo/24.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (95947 total samples) + 50.0th: 6 (34809 samples) + 90.0th: 10 (20455 samples) + * 99.0th: 17 (7897 samples) + 99.9th: 36 (573 samples) + min=1, max=11689 +Request Latencies percentiles (usec) runtime 10 (s) (96965 total samples) + 50.0th: 2436 (0 samples) + 90.0th: 2452 (43308 samples) + * 99.0th: 2484 (1337 samples) + 99.9th: 4696 (693 samples) + min=2416, max=12065 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9616 (3 samples) + * 50.0th: 9712 (7 samples) + 90.0th: 9712 (0 samples) + min=9587, max=9733 +current rps: 9726 +Wakeup Latencies percentiles (usec) runtime 20 (s) (192011 total samples) + 50.0th: 6 (70736 samples) + 90.0th: 10 (40084 samples) + * 99.0th: 17 (15201 samples) + 99.9th: 29 (1286 samples) + min=1, max=11689 +Request Latencies percentiles (usec) runtime 20 (s) (194016 total samples) + 50.0th: 2436 (0 samples) + 90.0th: 2452 (91371 samples) + * 99.0th: 2476 (2773 samples) + 99.9th: 4680 (1604 samples) + min=2416, max=12084 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9680 (5 samples) + * 50.0th: 9712 (15 samples) + 90.0th: 9712 (0 samples) + min=9587, max=9733 +current rps: 9714 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288113 total samples) + 50.0th: 6 (106330 samples) + 90.0th: 10 (59683 samples) + * 99.0th: 17 (22355 samples) + 99.9th: 28 (1848 samples) + min=1, max=11689 +Request Latencies percentiles (usec) runtime 30 (s) (291134 total samples) + 50.0th: 2436 (0 samples) + 90.0th: 2452 (138072 samples) + * 99.0th: 2476 (4481 samples) + 99.9th: 4664 (2292 samples) + min=2416, max=12084 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9712 (30 samples) + * 50.0th: 9712 (0 samples) + 90.0th: 9712 (0 samples) + min=9587, max=9733 +current rps: 9709 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288113 total samples) + 50.0th: 6 (106330 samples) + 90.0th: 10 (59683 samples) + * 99.0th: 17 (22355 samples) + 99.9th: 28 (1848 samples) + min=1, max=11689 +Request Latencies percentiles (usec) runtime 30 (s) (291158 total samples) + 50.0th: 2436 (0 samples) + 90.0th: 2452 (138078 samples) + * 99.0th: 2476 (4484 samples) + 99.9th: 4664 (2298 samples) + min=2416, max=12084 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9712 (30 samples) + * 50.0th: 9712 (0 samples) + 90.0th: 9712 (0 samples) + min=9587, max=9733 +average rps: 9705 +message_threads 1 +worker_threads 24 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/240.txt b/paper_results/schbench/linux_fifo/240.txt new file mode 100644 index 0000000..a47f600 --- /dev/null +++ b/paper_results/schbench/linux_fifo/240.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97523 total samples) + 50.0th: 3820 (29262 samples) + 90.0th: 17888 (39004 samples) + * 99.0th: 36544 (8768 samples) + 99.9th: 75136 (877 samples) + min=1, max=88682 +Request Latencies percentiles (usec) runtime 10 (s) (97426 total samples) + 50.0th: 9424 (30933 samples) + 90.0th: 21152 (37737 samples) + * 99.0th: 49216 (8048 samples) + 99.9th: 77952 (879 samples) + min=2426, max=93845 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9680 (4 samples) + * 50.0th: 9712 (7 samples) + 90.0th: 9712 (0 samples) + min=9605, max=9712 +current rps: 9702 +Wakeup Latencies percentiles (usec) runtime 20 (s) (195077 total samples) + 50.0th: 3996 (58545 samples) + 90.0th: 18208 (78125 samples) + * 99.0th: 37312 (17424 samples) + 99.9th: 74368 (1751 samples) + min=1, max=96628 +Request Latencies percentiles (usec) runtime 20 (s) (194998 total samples) + 50.0th: 9392 (57406 samples) + 90.0th: 21152 (78929 samples) + * 99.0th: 53440 (16601 samples) + 99.9th: 77952 (1768 samples) + min=2425, max=99046 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9680 (9 samples) + * 50.0th: 9712 (12 samples) + 90.0th: 9712 (0 samples) + min=9605, max=9712 +current rps: 9693 +Wakeup Latencies percentiles (usec) runtime 30 (s) (293220 total samples) + 50.0th: 4028 (87925 samples) + 90.0th: 18272 (117415 samples) + * 99.0th: 37568 (26244 samples) + 99.9th: 75136 (2637 samples) + min=1, max=100971 +Request Latencies percentiles (usec) runtime 30 (s) (293098 total samples) + 50.0th: 9392 (86420 samples) + 90.0th: 21152 (117526 samples) + * 99.0th: 54464 (25386 samples) + 99.9th: 77952 (2615 samples) + min=2424, max=108225 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (9 samples) + * 50.0th: 9712 (22 samples) + 90.0th: 9712 (0 samples) + min=9605, max=9712 +current rps: 9704 +Wakeup Latencies percentiles (usec) runtime 30 (s) (293329 total samples) + 50.0th: 4036 (88021 samples) + 90.0th: 18272 (117383 samples) + * 99.0th: 37568 (26283 samples) + 99.9th: 75136 (2637 samples) + min=1, max=100971 +Request Latencies percentiles (usec) runtime 30 (s) (293346 total samples) + 50.0th: 9392 (86465 samples) + 90.0th: 21152 (117610 samples) + * 99.0th: 54464 (25440 samples) + 99.9th: 77952 (2615 samples) + min=2424, max=108225 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (9 samples) + * 50.0th: 9712 (22 samples) + 90.0th: 9712 (0 samples) + min=9605, max=9712 +average rps: 9778 +message_threads 1 +worker_threads 240 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/256.txt b/paper_results/schbench/linux_fifo/256.txt new file mode 100644 index 0000000..f479870 --- /dev/null +++ b/paper_results/schbench/linux_fifo/256.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97828 total samples) + 50.0th: 4344 (29362 samples) + 90.0th: 18592 (39157 samples) + * 99.0th: 35136 (8755 samples) + 99.9th: 72832 (885 samples) + min=1, max=87784 +Request Latencies percentiles (usec) runtime 10 (s) (97704 total samples) + 50.0th: 9488 (29309 samples) + 90.0th: 21792 (39065 samples) + * 99.0th: 44480 (8789 samples) + 99.9th: 75648 (875 samples) + min=2420, max=92584 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9680 (5 samples) + * 50.0th: 9712 (6 samples) + 90.0th: 9712 (0 samples) + min=9615, max=9711 +current rps: 9693 +Wakeup Latencies percentiles (usec) runtime 20 (s) (195623 total samples) + 50.0th: 4232 (58742 samples) + 90.0th: 18720 (78296 samples) + * 99.0th: 37312 (17451 samples) + 99.9th: 75648 (1764 samples) + min=1, max=92448 +Request Latencies percentiles (usec) runtime 20 (s) (195520 total samples) + 50.0th: 9488 (58701 samples) + 90.0th: 22432 (77967 samples) + * 99.0th: 54336 (17608 samples) + 99.9th: 77952 (1751 samples) + min=2420, max=95347 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9680 (10 samples) + * 50.0th: 9712 (11 samples) + 90.0th: 9712 (0 samples) + min=9615, max=9711 +current rps: 9692 +Wakeup Latencies percentiles (usec) runtime 30 (s) (293552 total samples) + 50.0th: 4184 (88110 samples) + 90.0th: 18784 (117577 samples) + * 99.0th: 37568 (26176 samples) + 99.9th: 75904 (2641 samples) + min=1, max=92448 +Request Latencies percentiles (usec) runtime 30 (s) (293546 total samples) + 50.0th: 9488 (87996 samples) + 90.0th: 23200 (116860 samples) + * 99.0th: 54720 (26408 samples) + 99.9th: 78208 (2643 samples) + min=2420, max=95515 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (14 samples) + * 50.0th: 9712 (17 samples) + 90.0th: 9712 (0 samples) + min=9615, max=9711 +current rps: 9699 +Wakeup Latencies percentiles (usec) runtime 30 (s) (293785 total samples) + 50.0th: 4168 (88109 samples) + 90.0th: 18784 (117756 samples) + * 99.0th: 37568 (26190 samples) + 99.9th: 75904 (2641 samples) + min=1, max=92448 +Request Latencies percentiles (usec) runtime 30 (s) (293806 total samples) + 50.0th: 9488 (88049 samples) + 90.0th: 23200 (116972 samples) + * 99.0th: 54720 (26427 samples) + 99.9th: 78208 (2643 samples) + min=2420, max=95515 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (14 samples) + * 50.0th: 9712 (17 samples) + 90.0th: 9712 (0 samples) + min=9615, max=9711 +average rps: 9794 +message_threads 1 +worker_threads 256 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/32.txt b/paper_results/schbench/linux_fifo/32.txt new file mode 100644 index 0000000..2bf5ce4 --- /dev/null +++ b/paper_results/schbench/linux_fifo/32.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96301 total samples) + 50.0th: 33 (27278 samples) + 90.0th: 193 (38537 samples) + * 99.0th: 1058 (8659 samples) + 99.9th: 1842 (859 samples) + min=1, max=49418 +Request Latencies percentiles (usec) runtime 10 (s) (96401 total samples) + 50.0th: 2732 (27870 samples) + 90.0th: 3348 (38039 samples) + * 99.0th: 4376 (8624 samples) + 99.9th: 30624 (844 samples) + min=2420, max=53097 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9648 (11 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9585, max=9659 +current rps: 9648 +Wakeup Latencies percentiles (usec) runtime 20 (s) (192610 total samples) + 50.0th: 33 (58255 samples) + 90.0th: 192 (76353 samples) + * 99.0th: 1102 (17325 samples) + 99.9th: 1842 (1720 samples) + min=1, max=49418 +Request Latencies percentiles (usec) runtime 20 (s) (192845 total samples) + 50.0th: 2724 (54902 samples) + 90.0th: 3356 (76369 samples) + * 99.0th: 4328 (17345 samples) + 99.9th: 30688 (1697 samples) + min=2420, max=53214 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9648 (21 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9585, max=9659 +current rps: 9638 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288968 total samples) + 50.0th: 33 (87841 samples) + 90.0th: 190 (113810 samples) + * 99.0th: 1102 (25985 samples) + 99.9th: 1842 (2601 samples) + min=1, max=49617 +Request Latencies percentiles (usec) runtime 30 (s) (289341 total samples) + 50.0th: 2724 (82543 samples) + 90.0th: 3356 (114701 samples) + * 99.0th: 4312 (25923 samples) + 99.9th: 5784 (2584 samples) + min=2420, max=53214 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (31 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9585, max=9659 +current rps: 9651 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288972 total samples) + 50.0th: 33 (87841 samples) + 90.0th: 190 (113811 samples) + * 99.0th: 1102 (25987 samples) + 99.9th: 1842 (2601 samples) + min=1, max=49617 +Request Latencies percentiles (usec) runtime 30 (s) (289373 total samples) + 50.0th: 2724 (82557 samples) + 90.0th: 3356 (114712 samples) + * 99.0th: 4312 (25924 samples) + 99.9th: 5784 (2584 samples) + min=2420, max=53214 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (31 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9585, max=9659 +average rps: 9646 +message_threads 1 +worker_threads 32 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/4.txt b/paper_results/schbench/linux_fifo/4.txt new file mode 100644 index 0000000..f41b4a9 --- /dev/null +++ b/paper_results/schbench/linux_fifo/4.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (16245 total samples) + 50.0th: 6 (10464 samples) + 90.0th: 7 (2199 samples) + * 99.0th: 8 (122 samples) + 99.9th: 3804 (103 samples) + min=1, max=7310 +Request Latencies percentiles (usec) runtime 10 (s) (16249 total samples) + 50.0th: 2436 (7589 samples) + 90.0th: 2452 (1702 samples) + * 99.0th: 2476 (1046 samples) + 99.9th: 7992 (92 samples) + min=2415, max=8070 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 1622 (3 samples) + * 50.0th: 1626 (7 samples) + 90.0th: 1626 (0 samples) + min=1617, max=1628 +current rps: 1625 +Wakeup Latencies percentiles (usec) runtime 20 (s) (32490 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (4319 samples) + * 99.0th: 8 (351 samples) + 99.9th: 3980 (234 samples) + min=1, max=7310 +Request Latencies percentiles (usec) runtime 20 (s) (32504 total samples) + 50.0th: 2436 (14743 samples) + 90.0th: 2452 (3589 samples) + * 99.0th: 2476 (2340 samples) + 99.9th: 7992 (150 samples) + min=2415, max=8070 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 1626 (19 samples) + * 50.0th: 1626 (0 samples) + 90.0th: 1626 (0 samples) + min=1617, max=1629 +current rps: 1626 +Wakeup Latencies percentiles (usec) runtime 30 (s) (48693 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (6853 samples) + * 99.0th: 8 (561 samples) + 99.9th: 4104 (360 samples) + min=1, max=7310 +Request Latencies percentiles (usec) runtime 30 (s) (48724 total samples) + 50.0th: 2436 (24851 samples) + 90.0th: 2452 (5405 samples) + * 99.0th: 2476 (3931 samples) + 99.9th: 7992 (231 samples) + min=2415, max=8070 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 1622 (10 samples) + * 50.0th: 1626 (19 samples) + 90.0th: 1626 (0 samples) + min=1611, max=1629 +current rps: 1611 +Wakeup Latencies percentiles (usec) runtime 30 (s) (48693 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (6853 samples) + * 99.0th: 8 (561 samples) + 99.9th: 4104 (360 samples) + min=1, max=7310 +Request Latencies percentiles (usec) runtime 30 (s) (48728 total samples) + 50.0th: 2436 (24851 samples) + 90.0th: 2452 (5406 samples) + * 99.0th: 2476 (3932 samples) + 99.9th: 7992 (232 samples) + min=2415, max=8070 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 1622 (10 samples) + * 50.0th: 1626 (19 samples) + 90.0th: 1626 (0 samples) + min=1611, max=1629 +average rps: 1624 +message_threads 1 +worker_threads 4 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/40.txt b/paper_results/schbench/linux_fifo/40.txt new file mode 100644 index 0000000..df94300 --- /dev/null +++ b/paper_results/schbench/linux_fifo/40.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96127 total samples) + 50.0th: 67 (28791 samples) + 90.0th: 735 (38343 samples) + * 99.0th: 2068 (8655 samples) + 99.9th: 3316 (854 samples) + min=1, max=52330 +Request Latencies percentiles (usec) runtime 10 (s) (96172 total samples) + 50.0th: 3068 (28272 samples) + 90.0th: 4328 (38609 samples) + * 99.0th: 6008 (8469 samples) + 99.9th: 52544 (868 samples) + min=2420, max=56349 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9584 (4 samples) + * 50.0th: 9616 (6 samples) + 90.0th: 9616 (0 samples) + min=9556, max=9632 +current rps: 9626 +Wakeup Latencies percentiles (usec) runtime 20 (s) (192336 total samples) + 50.0th: 68 (57564 samples) + 90.0th: 733 (75958 samples) + * 99.0th: 2052 (17336 samples) + 99.9th: 3236 (1692 samples) + min=1, max=52330 +Request Latencies percentiles (usec) runtime 20 (s) (192450 total samples) + 50.0th: 3076 (57377 samples) + 90.0th: 4280 (76482 samples) + * 99.0th: 5960 (17196 samples) + 99.9th: 52800 (1723 samples) + min=2419, max=56410 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9616 (20 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9556, max=9632 +current rps: 9624 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288542 total samples) + 50.0th: 68 (86252 samples) + 90.0th: 739 (113885 samples) + * 99.0th: 2046 (25955 samples) + 99.9th: 3228 (2588 samples) + min=1, max=52330 +Request Latencies percentiles (usec) runtime 30 (s) (288725 total samples) + 50.0th: 3076 (85996 samples) + 90.0th: 4264 (114692 samples) + * 99.0th: 5944 (25952 samples) + 99.9th: 52800 (2584 samples) + min=2419, max=56410 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9616 (30 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9556, max=9632 +current rps: 9626 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288551 total samples) + 50.0th: 68 (86253 samples) + 90.0th: 739 (113887 samples) + * 99.0th: 2046 (25959 samples) + 99.9th: 3228 (2588 samples) + min=1, max=52330 +Request Latencies percentiles (usec) runtime 30 (s) (288768 total samples) + 50.0th: 3076 (86013 samples) + 90.0th: 4264 (114705 samples) + * 99.0th: 5944 (25956 samples) + 99.9th: 52800 (2586 samples) + min=2419, max=56410 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9616 (30 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9556, max=9632 +average rps: 9626 +message_threads 1 +worker_threads 40 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/48.txt b/paper_results/schbench/linux_fifo/48.txt new file mode 100644 index 0000000..949fe62 --- /dev/null +++ b/paper_results/schbench/linux_fifo/48.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (95997 total samples) + 50.0th: 110 (28792 samples) + 90.0th: 1378 (38285 samples) + * 99.0th: 3036 (8638 samples) + 99.9th: 6616 (861 samples) + min=1, max=54747 +Request Latencies percentiles (usec) runtime 10 (s) (96001 total samples) + 50.0th: 3364 (28448 samples) + 90.0th: 4840 (38749 samples) + * 99.0th: 7880 (8217 samples) + 99.9th: 53824 (859 samples) + min=2423, max=57538 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9584 (6 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9616 (5 samples) + min=9532, max=9611 +current rps: 9599 +Wakeup Latencies percentiles (usec) runtime 20 (s) (192039 total samples) + 50.0th: 109 (57358 samples) + 90.0th: 1370 (76505 samples) + * 99.0th: 3060 (17242 samples) + 99.9th: 6648 (1722 samples) + min=1, max=54747 +Request Latencies percentiles (usec) runtime 20 (s) (192098 total samples) + 50.0th: 3356 (57678 samples) + 90.0th: 4840 (77533 samples) + * 99.0th: 7960 (16410 samples) + 99.9th: 54080 (1732 samples) + min=2422, max=57538 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9584 (7 samples) + * 50.0th: 9616 (14 samples) + 90.0th: 9616 (0 samples) + min=9532, max=9616 +current rps: 9610 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288051 total samples) + 50.0th: 109 (85844 samples) + 90.0th: 1366 (115087 samples) + * 99.0th: 3108 (25797 samples) + 99.9th: 6568 (2593 samples) + min=1, max=56082 +Request Latencies percentiles (usec) runtime 30 (s) (288139 total samples) + 50.0th: 3356 (86306 samples) + 90.0th: 4824 (115267 samples) + * 99.0th: 8040 (25715 samples) + 99.9th: 54080 (2590 samples) + min=2422, max=60475 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9584 (9 samples) + * 50.0th: 9616 (22 samples) + 90.0th: 9616 (0 samples) + min=9532, max=9616 +current rps: 9602 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288059 total samples) + 50.0th: 109 (85845 samples) + 90.0th: 1366 (115087 samples) + * 99.0th: 3108 (25799 samples) + 99.9th: 6568 (2593 samples) + min=1, max=56082 +Request Latencies percentiles (usec) runtime 30 (s) (288187 total samples) + 50.0th: 3356 (86312 samples) + 90.0th: 4824 (115290 samples) + * 99.0th: 8040 (25720 samples) + 99.9th: 54080 (2590 samples) + min=2422, max=60475 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9584 (9 samples) + * 50.0th: 9616 (22 samples) + 90.0th: 9616 (0 samples) + min=9532, max=9616 +average rps: 9606 +message_threads 1 +worker_threads 48 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/64.txt b/paper_results/schbench/linux_fifo/64.txt new file mode 100644 index 0000000..ce1d111 --- /dev/null +++ b/paper_results/schbench/linux_fifo/64.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (95993 total samples) + 50.0th: 342 (28650 samples) + 90.0th: 2340 (38409 samples) + * 99.0th: 6776 (8614 samples) + 99.9th: 50496 (862 samples) + min=1, max=59153 +Request Latencies percentiles (usec) runtime 10 (s) (95972 total samples) + 50.0th: 3908 (28661 samples) + 90.0th: 6680 (38305 samples) + * 99.0th: 12336 (8608 samples) + 99.9th: 55616 (863 samples) + min=2421, max=64233 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9584 (10 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9584 (0 samples) + min=9499, max=9600 +current rps: 9597 +Wakeup Latencies percentiles (usec) runtime 20 (s) (192104 total samples) + 50.0th: 340 (57130 samples) + 90.0th: 2340 (76934 samples) + * 99.0th: 6728 (17155 samples) + 99.9th: 31776 (1724 samples) + min=1, max=60148 +Request Latencies percentiles (usec) runtime 20 (s) (192119 total samples) + 50.0th: 3908 (57802 samples) + 90.0th: 6648 (76652 samples) + * 99.0th: 12080 (17261 samples) + 99.9th: 55872 (1736 samples) + min=2421, max=65103 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9584 (17 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9616 (4 samples) + min=9499, max=9611 +current rps: 9599 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288312 total samples) + 50.0th: 338 (85755 samples) + 90.0th: 2332 (115386 samples) + * 99.0th: 6696 (25893 samples) + 99.9th: 50496 (2582 samples) + min=1, max=64327 +Request Latencies percentiles (usec) runtime 30 (s) (288367 total samples) + 50.0th: 3900 (86641 samples) + 90.0th: 6632 (115311 samples) + * 99.0th: 12048 (25903 samples) + 99.9th: 55744 (2541 samples) + min=2419, max=68992 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9584 (22 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9616 (9 samples) + min=9499, max=9611 +current rps: 9609 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288341 total samples) + 50.0th: 338 (85764 samples) + 90.0th: 2332 (115387 samples) + * 99.0th: 6696 (25900 samples) + 99.9th: 50496 (2584 samples) + min=1, max=64327 +Request Latencies percentiles (usec) runtime 30 (s) (288433 total samples) + 50.0th: 3900 (86661 samples) + 90.0th: 6632 (115338 samples) + * 99.0th: 12048 (25908 samples) + 99.9th: 55744 (2541 samples) + min=2419, max=68992 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9584 (22 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9616 (9 samples) + min=9499, max=9611 +average rps: 9614 +message_threads 1 +worker_threads 64 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/72.txt b/paper_results/schbench/linux_fifo/72.txt new file mode 100644 index 0000000..e3ce0f6 --- /dev/null +++ b/paper_results/schbench/linux_fifo/72.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96001 total samples) + 50.0th: 479 (28706 samples) + 90.0th: 2964 (38415 samples) + * 99.0th: 8400 (8633 samples) + 99.9th: 50496 (857 samples) + min=1, max=66741 +Request Latencies percentiles (usec) runtime 10 (s) (95977 total samples) + 50.0th: 4280 (28816 samples) + 90.0th: 7160 (38749 samples) + * 99.0th: 14352 (8138 samples) + 99.9th: 56896 (859 samples) + min=2420, max=69588 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9584 (11 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9584 (0 samples) + min=9526, max=9599 +current rps: 9594 +Wakeup Latencies percentiles (usec) runtime 20 (s) (192163 total samples) + 50.0th: 500 (57274 samples) + 90.0th: 3060 (76899 samples) + * 99.0th: 8400 (17260 samples) + 99.9th: 50624 (1733 samples) + min=1, max=66741 +Request Latencies percentiles (usec) runtime 20 (s) (192175 total samples) + 50.0th: 4280 (57778 samples) + 90.0th: 7144 (76623 samples) + * 99.0th: 14352 (17200 samples) + 99.9th: 56896 (1701 samples) + min=2420, max=69588 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9584 (20 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9584 (0 samples) + min=9526, max=9600 +current rps: 9599 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288177 total samples) + 50.0th: 501 (86002 samples) + 90.0th: 3060 (115216 samples) + * 99.0th: 8368 (25898 samples) + 99.9th: 51008 (2592 samples) + min=1, max=67328 +Request Latencies percentiles (usec) runtime 30 (s) (288229 total samples) + 50.0th: 4280 (86334 samples) + 90.0th: 7144 (115370 samples) + * 99.0th: 14320 (25453 samples) + 99.9th: 56896 (2598 samples) + min=2420, max=69618 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9584 (29 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9584 (0 samples) + min=9526, max=9600 +current rps: 9587 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288217 total samples) + 50.0th: 501 (86012 samples) + 90.0th: 3060 (115230 samples) + * 99.0th: 8368 (25909 samples) + 99.9th: 51008 (2592 samples) + min=1, max=67328 +Request Latencies percentiles (usec) runtime 30 (s) (288303 total samples) + 50.0th: 4280 (86355 samples) + 90.0th: 7144 (115403 samples) + * 99.0th: 14320 (25455 samples) + 99.9th: 56896 (2598 samples) + min=2420, max=69618 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9584 (29 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9584 (0 samples) + min=9526, max=9600 +average rps: 9610 +message_threads 1 +worker_threads 72 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/8.txt b/paper_results/schbench/linux_fifo/8.txt new file mode 100644 index 0000000..549efa9 --- /dev/null +++ b/paper_results/schbench/linux_fifo/8.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (32530 total samples) + 50.0th: 6 (19854 samples) + 90.0th: 7 (4154 samples) + * 99.0th: 8 (723 samples) + 99.9th: 3764 (245 samples) + min=1, max=6636 +Request Latencies percentiles (usec) runtime 10 (s) (32578 total samples) + 50.0th: 2428 (0 samples) + 90.0th: 2436 (13508 samples) + * 99.0th: 2460 (1463 samples) + 99.9th: 7992 (246 samples) + min=2416, max=8070 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 3252 (4 samples) + * 50.0th: 3260 (6 samples) + 90.0th: 3260 (0 samples) + min=3251, max=3272 +current rps: 3251 +Wakeup Latencies percentiles (usec) runtime 20 (s) (65038 total samples) + 50.0th: 6 (40641 samples) + 90.0th: 7 (9194 samples) + * 99.0th: 8 (1468 samples) + 99.9th: 4028 (494 samples) + min=1, max=7786 +Request Latencies percentiles (usec) runtime 20 (s) (65152 total samples) + 50.0th: 2428 (0 samples) + 90.0th: 2436 (28607 samples) + * 99.0th: 2452 (2265 samples) + 99.9th: 4536 (532 samples) + min=2416, max=8070 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 3252 (7 samples) + * 50.0th: 3260 (13 samples) + 90.0th: 3260 (0 samples) + min=3251, max=3272 +current rps: 3260 +Wakeup Latencies percentiles (usec) runtime 30 (s) (97553 total samples) + 50.0th: 6 (60669 samples) + 90.0th: 7 (13672 samples) + * 99.0th: 8 (2470 samples) + 99.9th: 4152 (804 samples) + min=1, max=7786 +Request Latencies percentiles (usec) runtime 30 (s) (97726 total samples) + 50.0th: 2428 (0 samples) + 90.0th: 2436 (42949 samples) + * 99.0th: 2452 (2914 samples) + 99.9th: 4280 (651 samples) + min=2416, max=8088 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 3252 (10 samples) + * 50.0th: 3260 (20 samples) + 90.0th: 3260 (0 samples) + min=3251, max=3272 +current rps: 3259 +Wakeup Latencies percentiles (usec) runtime 30 (s) (97553 total samples) + 50.0th: 6 (60669 samples) + 90.0th: 7 (13672 samples) + * 99.0th: 8 (2470 samples) + 99.9th: 4152 (804 samples) + min=1, max=7786 +Request Latencies percentiles (usec) runtime 30 (s) (97734 total samples) + 50.0th: 2428 (0 samples) + 90.0th: 2436 (42951 samples) + * 99.0th: 2452 (2915 samples) + 99.9th: 4280 (651 samples) + min=2416, max=8088 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 3252 (10 samples) + * 50.0th: 3260 (20 samples) + 90.0th: 3260 (0 samples) + min=3251, max=3272 +average rps: 3258 +message_threads 1 +worker_threads 8 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/80.txt b/paper_results/schbench/linux_fifo/80.txt new file mode 100644 index 0000000..692ad53 --- /dev/null +++ b/paper_results/schbench/linux_fifo/80.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (95754 total samples) + 50.0th: 699 (28680 samples) + 90.0th: 3988 (38307 samples) + * 99.0th: 9648 (8612 samples) + 99.9th: 52032 (858 samples) + min=1, max=69928 +Request Latencies percentiles (usec) runtime 10 (s) (95718 total samples) + 50.0th: 4712 (29799 samples) + 90.0th: 7480 (37203 samples) + * 99.0th: 15568 (8568 samples) + 99.9th: 57152 (863 samples) + min=2425, max=71725 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9552 (5 samples) + * 50.0th: 9584 (6 samples) + 90.0th: 9584 (0 samples) + min=9498, max=9579 +current rps: 9570 +Wakeup Latencies percentiles (usec) runtime 20 (s) (191630 total samples) + 50.0th: 673 (57348 samples) + 90.0th: 3924 (76651 samples) + * 99.0th: 9584 (17245 samples) + 99.9th: 52032 (1725 samples) + min=1, max=69928 +Request Latencies percentiles (usec) runtime 20 (s) (191613 total samples) + 50.0th: 4712 (60067 samples) + 90.0th: 7512 (73956 samples) + * 99.0th: 16016 (17169 samples) + 99.9th: 57152 (1735 samples) + min=2424, max=71725 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9552 (10 samples) + * 50.0th: 9584 (11 samples) + 90.0th: 9584 (0 samples) + min=9498, max=9582 +current rps: 9565 +Wakeup Latencies percentiles (usec) runtime 30 (s) (287455 total samples) + 50.0th: 677 (86170 samples) + 90.0th: 3956 (115027 samples) + * 99.0th: 9680 (25814 samples) + 99.9th: 52288 (2572 samples) + min=1, max=69928 +Request Latencies percentiles (usec) runtime 30 (s) (287449 total samples) + 50.0th: 4712 (90306 samples) + 90.0th: 7496 (110646 samples) + * 99.0th: 16208 (25791 samples) + 99.9th: 57024 (2602 samples) + min=2423, max=71725 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9552 (15 samples) + * 50.0th: 9584 (16 samples) + 90.0th: 9584 (0 samples) + min=9498, max=9582 +current rps: 9575 +Wakeup Latencies percentiles (usec) runtime 30 (s) (287483 total samples) + 50.0th: 677 (86176 samples) + 90.0th: 3956 (115040 samples) + * 99.0th: 9680 (25816 samples) + 99.9th: 52288 (2572 samples) + min=1, max=69928 +Request Latencies percentiles (usec) runtime 30 (s) (287530 total samples) + 50.0th: 4712 (90332 samples) + 90.0th: 7496 (110674 samples) + * 99.0th: 16208 (25799 samples) + 99.9th: 57024 (2602 samples) + min=2423, max=71725 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9552 (15 samples) + * 50.0th: 9584 (16 samples) + 90.0th: 9584 (0 samples) + min=9498, max=9582 +average rps: 9584 +message_threads 1 +worker_threads 80 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/96.txt b/paper_results/schbench/linux_fifo/96.txt new file mode 100644 index 0000000..f147b24 --- /dev/null +++ b/paper_results/schbench/linux_fifo/96.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96309 total samples) + 50.0th: 1038 (28878 samples) + 90.0th: 5304 (38514 samples) + * 99.0th: 12784 (8645 samples) + 99.9th: 52672 (862 samples) + min=1, max=69289 +Request Latencies percentiles (usec) runtime 10 (s) (96270 total samples) + 50.0th: 4744 (31579 samples) + 90.0th: 9456 (36349 samples) + * 99.0th: 18976 (8118 samples) + 99.9th: 58432 (843 samples) + min=2421, max=71868 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9584 (3 samples) + * 50.0th: 9616 (8 samples) + 90.0th: 9616 (0 samples) + min=9539, max=9623 +current rps: 9620 +Wakeup Latencies percentiles (usec) runtime 20 (s) (192648 total samples) + 50.0th: 1017 (57758 samples) + 90.0th: 5240 (77017 samples) + * 99.0th: 12720 (17317 samples) + 99.9th: 52672 (1728 samples) + min=1, max=71122 +Request Latencies percentiles (usec) runtime 20 (s) (192653 total samples) + 50.0th: 4744 (63931 samples) + 90.0th: 9456 (72146 samples) + * 99.0th: 19040 (16137 samples) + 99.9th: 59200 (1710 samples) + min=2421, max=73998 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9616 (20 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9539, max=9632 +current rps: 9614 +Wakeup Latencies percentiles (usec) runtime 30 (s) (289379 total samples) + 50.0th: 1013 (86823 samples) + 90.0th: 5224 (115738 samples) + * 99.0th: 12816 (26032 samples) + 99.9th: 53440 (2598 samples) + min=1, max=75559 +Request Latencies percentiles (usec) runtime 30 (s) (289380 total samples) + 50.0th: 4744 (96230 samples) + 90.0th: 9456 (108047 samples) + * 99.0th: 19040 (24261 samples) + 99.9th: 59584 (2601 samples) + min=2421, max=78624 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9616 (30 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9539, max=9632 +current rps: 9618 +Wakeup Latencies percentiles (usec) runtime 30 (s) (289418 total samples) + 50.0th: 1013 (86825 samples) + 90.0th: 5224 (115756 samples) + * 99.0th: 12816 (26050 samples) + 99.9th: 53440 (2598 samples) + min=1, max=75559 +Request Latencies percentiles (usec) runtime 30 (s) (289478 total samples) + 50.0th: 4744 (96247 samples) + 90.0th: 9456 (108099 samples) + * 99.0th: 19040 (24271 samples) + 99.9th: 59584 (2601 samples) + min=2421, max=78624 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9616 (30 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9539, max=9632 +average rps: 9649 +message_threads 1 +worker_threads 96 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_fifo/all.csv b/paper_results/schbench/linux_fifo/all.csv new file mode 100644 index 0000000..8a1ad61 --- /dev/null +++ b/paper_results/schbench/linux_fifo/all.csv @@ -0,0 +1,22 @@ +cores,lat99,rps50 +4,8,1626,2476 +8,8,3260,2452 +16,9,6504,2452 +24,17,9712,2476 +32,1102,9648,4312 +40,2046,9616,5944 +48,3108,9616,8040 +64,6696,9584,12048 +72,8368,9584,14320 +80,9680,9584,16208 +96,12816,9616,19040 +112,15312,9648,21280 +128,18272,9648,23712 +144,21152,9648,27104 +160,23200,9648,28768 +176,25248,9648,32832 +192,27616,9680,33472 +208,30176,9680,40000 +224,33216,9680,44608 +240,37568,9712,54464 +256,37568,9712,54720 diff --git a/paper_results/schbench/linux_rr/112.txt b/paper_results/schbench/linux_rr/112.txt new file mode 100644 index 0000000..7e7298c --- /dev/null +++ b/paper_results/schbench/linux_rr/112.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96405 total samples) + 50.0th: 1390 (28940 samples) + 90.0th: 6776 (38563 samples) + * 99.0th: 14992 (8637 samples) + 99.9th: 57920 (866 samples) + min=1, max=88003 +Request Latencies percentiles (usec) runtime 10 (s) (96350 total samples) + 50.0th: 4776 (29471 samples) + 90.0th: 10256 (37951 samples) + * 99.0th: 21344 (8689 samples) + 99.9th: 60864 (834 samples) + min=2425, max=95175 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9616 (11 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9559, max=9621 +current rps: 9621 +Wakeup Latencies percentiles (usec) runtime 20 (s) (192850 total samples) + 50.0th: 1390 (57945 samples) + 90.0th: 6728 (77088 samples) + * 99.0th: 14928 (17324 samples) + 99.9th: 56512 (1731 samples) + min=1, max=88003 +Request Latencies percentiles (usec) runtime 20 (s) (192825 total samples) + 50.0th: 4776 (59137 samples) + 90.0th: 10224 (75861 samples) + * 99.0th: 21280 (17348 samples) + 99.9th: 60352 (1662 samples) + min=2422, max=95175 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9616 (21 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9559, max=9628 +current rps: 9619 +Wakeup Latencies percentiles (usec) runtime 30 (s) (289332 total samples) + 50.0th: 1382 (86849 samples) + 90.0th: 6744 (115719 samples) + * 99.0th: 14960 (25979 samples) + 99.9th: 56640 (2605 samples) + min=1, max=88003 +Request Latencies percentiles (usec) runtime 30 (s) (289316 total samples) + 50.0th: 4776 (88677 samples) + 90.0th: 10192 (113807 samples) + * 99.0th: 21280 (26086 samples) + 99.9th: 60864 (2548 samples) + min=2421, max=95175 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9616 (31 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9559, max=9628 +current rps: 9621 +Wakeup Latencies percentiles (usec) runtime 30 (s) (289393 total samples) + 50.0th: 1382 (86858 samples) + 90.0th: 6744 (115750 samples) + * 99.0th: 14960 (25991 samples) + 99.9th: 56640 (2605 samples) + min=1, max=88003 +Request Latencies percentiles (usec) runtime 30 (s) (289434 total samples) + 50.0th: 4776 (88702 samples) + 90.0th: 10192 (113867 samples) + * 99.0th: 21280 (26093 samples) + 99.9th: 60864 (2548 samples) + min=2421, max=95175 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9616 (31 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9559, max=9628 +average rps: 9648 +message_threads 1 +worker_threads 112 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/128.txt b/paper_results/schbench/linux_rr/128.txt new file mode 100644 index 0000000..d7d3348 --- /dev/null +++ b/paper_results/schbench/linux_rr/128.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96723 total samples) + 50.0th: 1646 (29013 samples) + 90.0th: 8104 (38685 samples) + * 99.0th: 17952 (8690 samples) + 99.9th: 58048 (871 samples) + min=1, max=79135 +Request Latencies percentiles (usec) runtime 10 (s) (96684 total samples) + 50.0th: 5560 (29051 samples) + 90.0th: 11792 (39145 samples) + * 99.0th: 23584 (8208 samples) + 99.9th: 64192 (845 samples) + min=2421, max=82636 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9584 (3 samples) + * 50.0th: 9648 (8 samples) + 90.0th: 9648 (0 samples) + min=9580, max=9650 +current rps: 9647 +Wakeup Latencies percentiles (usec) runtime 20 (s) (193826 total samples) + 50.0th: 1646 (58229 samples) + 90.0th: 8088 (77479 samples) + * 99.0th: 17568 (17407 samples) + 99.9th: 58176 (1741 samples) + min=1, max=86787 +Request Latencies percentiles (usec) runtime 20 (s) (193774 total samples) + 50.0th: 5528 (58177 samples) + 90.0th: 11792 (78490 samples) + * 99.0th: 23584 (16412 samples) + 99.9th: 63552 (1681 samples) + min=2421, max=89530 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9616 (5 samples) + * 50.0th: 9648 (16 samples) + 90.0th: 9648 (0 samples) + min=9580, max=9657 +current rps: 9657 +Wakeup Latencies percentiles (usec) runtime 30 (s) (290662 total samples) + 50.0th: 1646 (87137 samples) + 90.0th: 8120 (116303 samples) + * 99.0th: 18016 (26109 samples) + 99.9th: 59072 (2609 samples) + min=1, max=86787 +Request Latencies percentiles (usec) runtime 30 (s) (290651 total samples) + 50.0th: 5496 (87038 samples) + 90.0th: 11792 (117491 samples) + * 99.0th: 23648 (25039 samples) + 99.9th: 63680 (2497 samples) + min=2421, max=89530 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (31 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9580, max=9661 +current rps: 9647 +Wakeup Latencies percentiles (usec) runtime 30 (s) (290744 total samples) + 50.0th: 1646 (87164 samples) + 90.0th: 8120 (116340 samples) + * 99.0th: 18016 (26118 samples) + 99.9th: 59072 (2609 samples) + min=1, max=86787 +Request Latencies percentiles (usec) runtime 30 (s) (290784 total samples) + 50.0th: 5496 (87081 samples) + 90.0th: 11792 (117552 samples) + * 99.0th: 23648 (25052 samples) + 99.9th: 63680 (2497 samples) + min=2421, max=89530 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (31 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9648 (0 samples) + min=9580, max=9661 +average rps: 9693 +message_threads 1 +worker_threads 128 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/144.txt b/paper_results/schbench/linux_rr/144.txt new file mode 100644 index 0000000..498547d --- /dev/null +++ b/paper_results/schbench/linux_rr/144.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97118 total samples) + 50.0th: 1906 (29144 samples) + 90.0th: 9328 (38884 samples) + * 99.0th: 19232 (8701 samples) + 99.9th: 57920 (867 samples) + min=1, max=77285 +Request Latencies percentiles (usec) runtime 10 (s) (97060 total samples) + 50.0th: 7016 (29378 samples) + 90.0th: 13264 (38504 samples) + * 99.0th: 25952 (8756 samples) + 99.9th: 64448 (852 samples) + min=2421, max=79524 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9616 (3 samples) + * 50.0th: 9648 (4 samples) + 90.0th: 9680 (4 samples) + min=9605, max=9669 +current rps: 9658 +Wakeup Latencies percentiles (usec) runtime 20 (s) (194347 total samples) + 50.0th: 1910 (58291 samples) + 90.0th: 9360 (77778 samples) + * 99.0th: 19616 (17414 samples) + 99.9th: 60224 (1752 samples) + min=1, max=77285 +Request Latencies percentiles (usec) runtime 20 (s) (194276 total samples) + 50.0th: 7016 (58998 samples) + 90.0th: 13296 (76869 samples) + * 99.0th: 26272 (17435 samples) + 99.9th: 64960 (1749 samples) + min=2421, max=79524 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9648 (14 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9680 (7 samples) + min=9605, max=9673 +current rps: 9655 +Wakeup Latencies percentiles (usec) runtime 30 (s) (291457 total samples) + 50.0th: 1902 (87496 samples) + 90.0th: 9360 (116706 samples) + * 99.0th: 19680 (26023 samples) + 99.9th: 60608 (2598 samples) + min=1, max=78326 +Request Latencies percentiles (usec) runtime 30 (s) (291422 total samples) + 50.0th: 7016 (88838 samples) + 90.0th: 13264 (115122 samples) + * 99.0th: 27744 (26175 samples) + 99.9th: 65664 (2635 samples) + min=2421, max=81494 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (21 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9680 (10 samples) + min=9209, max=9673 +current rps: 9673 +Wakeup Latencies percentiles (usec) runtime 30 (s) (291547 total samples) + 50.0th: 1902 (87587 samples) + 90.0th: 9360 (116726 samples) + * 99.0th: 19680 (26034 samples) + 99.9th: 60608 (2598 samples) + min=1, max=78326 +Request Latencies percentiles (usec) runtime 30 (s) (291576 total samples) + 50.0th: 7016 (88885 samples) + 90.0th: 13264 (115181 samples) + * 99.0th: 27744 (26190 samples) + 99.9th: 65664 (2635 samples) + min=2421, max=81494 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (21 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9680 (10 samples) + min=9209, max=9673 +average rps: 9719 +message_threads 1 +worker_threads 144 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/16.txt b/paper_results/schbench/linux_rr/16.txt new file mode 100644 index 0000000..b357149 --- /dev/null +++ b/paper_results/schbench/linux_rr/16.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (64868 total samples) + 50.0th: 6 (34400 samples) + 90.0th: 7 (9719 samples) + * 99.0th: 10 (3600 samples) + 99.9th: 4504 (402 samples) + min=1, max=7751 +Request Latencies percentiles (usec) runtime 10 (s) (65094 total samples) + 50.0th: 2436 (31370 samples) + 90.0th: 2436 (0 samples) + * 99.0th: 2460 (5099 samples) + 99.9th: 4648 (432 samples) + min=2415, max=9229 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 6504 (8 samples) + * 50.0th: 6504 (0 samples) + 90.0th: 6520 (3 samples) + min=6498, max=6515 +current rps: 6510 +Wakeup Latencies percentiles (usec) runtime 20 (s) (129681 total samples) + 50.0th: 6 (68450 samples) + 90.0th: 7 (22648 samples) + * 99.0th: 10 (8707 samples) + 99.9th: 4616 (698 samples) + min=1, max=7751 +Request Latencies percentiles (usec) runtime 20 (s) (130190 total samples) + 50.0th: 2436 (68620 samples) + 90.0th: 2436 (0 samples) + * 99.0th: 2452 (10588 samples) + 99.9th: 4136 (1084 samples) + min=2415, max=9229 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 6504 (15 samples) + * 50.0th: 6504 (0 samples) + 90.0th: 6520 (6 samples) + min=6498, max=6516 +current rps: 6508 +Wakeup Latencies percentiles (usec) runtime 30 (s) (194459 total samples) + 50.0th: 6 (99844 samples) + 90.0th: 7 (34164 samples) + * 99.0th: 10 (15392 samples) + 99.9th: 4712 (1046 samples) + min=1, max=7899 +Request Latencies percentiles (usec) runtime 30 (s) (195282 total samples) + 50.0th: 2436 (106858 samples) + 90.0th: 2436 (0 samples) + * 99.0th: 2452 (15765 samples) + 99.9th: 2732 (1303 samples) + min=2415, max=9229 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 6504 (23 samples) + * 50.0th: 6504 (0 samples) + 90.0th: 6520 (8 samples) + min=6498, max=6516 +current rps: 6504 +Wakeup Latencies percentiles (usec) runtime 30 (s) (194461 total samples) + 50.0th: 6 (99845 samples) + 90.0th: 7 (34164 samples) + * 99.0th: 10 (15393 samples) + 99.9th: 4712 (1046 samples) + min=1, max=7899 +Request Latencies percentiles (usec) runtime 30 (s) (195299 total samples) + 50.0th: 2436 (106863 samples) + 90.0th: 2436 (0 samples) + * 99.0th: 2452 (15773 samples) + 99.9th: 2732 (1305 samples) + min=2415, max=9229 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 6504 (23 samples) + * 50.0th: 6504 (0 samples) + 90.0th: 6520 (8 samples) + min=6498, max=6516 +average rps: 6510 +message_threads 1 +worker_threads 16 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/160.txt b/paper_results/schbench/linux_rr/160.txt new file mode 100644 index 0000000..02f266b --- /dev/null +++ b/paper_results/schbench/linux_rr/160.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97115 total samples) + 50.0th: 2084 (29225 samples) + 90.0th: 10608 (38742 samples) + * 99.0th: 21408 (8729 samples) + 99.9th: 63680 (880 samples) + min=1, max=78299 +Request Latencies percentiles (usec) runtime 10 (s) (97052 total samples) + 50.0th: 7048 (28560 samples) + 90.0th: 14096 (38185 samples) + * 99.0th: 28192 (8605 samples) + 99.9th: 66688 (847 samples) + min=2423, max=81777 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9648 (3 samples) + * 50.0th: 9680 (8 samples) + 90.0th: 9680 (0 samples) + min=9607, max=9688 +current rps: 9685 +Wakeup Latencies percentiles (usec) runtime 20 (s) (194316 total samples) + 50.0th: 2124 (58411 samples) + 90.0th: 10832 (77709 samples) + * 99.0th: 22368 (17377 samples) + 99.9th: 63680 (1747 samples) + min=1, max=87495 +Request Latencies percentiles (usec) runtime 20 (s) (194307 total samples) + 50.0th: 7048 (57291 samples) + 90.0th: 14128 (76889 samples) + * 99.0th: 28256 (16322 samples) + 99.9th: 67456 (1752 samples) + min=2419, max=92708 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9680 (21 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9607, max=9692 +current rps: 9682 +Wakeup Latencies percentiles (usec) runtime 30 (s) (291736 total samples) + 50.0th: 2132 (87648 samples) + 90.0th: 10896 (116530 samples) + * 99.0th: 22496 (26191 samples) + 99.9th: 64064 (2629 samples) + min=1, max=87495 +Request Latencies percentiles (usec) runtime 30 (s) (291693 total samples) + 50.0th: 7048 (85641 samples) + 90.0th: 14128 (114781 samples) + * 99.0th: 28256 (25016 samples) + 99.9th: 67200 (2560 samples) + min=2419, max=100454 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (31 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9607, max=9695 +current rps: 9671 +Wakeup Latencies percentiles (usec) runtime 30 (s) (291834 total samples) + 50.0th: 2132 (87666 samples) + 90.0th: 10896 (116576 samples) + * 99.0th: 22496 (26212 samples) + 99.9th: 64064 (2629 samples) + min=1, max=87495 +Request Latencies percentiles (usec) runtime 30 (s) (291860 total samples) + 50.0th: 7048 (85680 samples) + 90.0th: 14128 (114845 samples) + * 99.0th: 28256 (25040 samples) + 99.9th: 67200 (2560 samples) + min=2419, max=100454 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (31 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9607, max=9695 +average rps: 9729 +message_threads 1 +worker_threads 160 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/176.txt b/paper_results/schbench/linux_rr/176.txt new file mode 100644 index 0000000..86458e8 --- /dev/null +++ b/paper_results/schbench/linux_rr/176.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97418 total samples) + 50.0th: 2276 (29332 samples) + 90.0th: 11920 (38894 samples) + * 99.0th: 24416 (8718 samples) + 99.9th: 64576 (878 samples) + min=1, max=80041 +Request Latencies percentiles (usec) runtime 10 (s) (97320 total samples) + 50.0th: 7096 (28667 samples) + 90.0th: 16304 (38314 samples) + * 99.0th: 31264 (8756 samples) + 99.9th: 69760 (878 samples) + min=2423, max=95385 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9680 (11 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9599, max=9683 +current rps: 9678 +Wakeup Latencies percentiles (usec) runtime 20 (s) (194561 total samples) + 50.0th: 2260 (58430 samples) + 90.0th: 11856 (77839 samples) + * 99.0th: 23968 (17390 samples) + 99.9th: 64192 (1752 samples) + min=1, max=80330 +Request Latencies percentiles (usec) runtime 20 (s) (194445 total samples) + 50.0th: 7096 (57239 samples) + 90.0th: 16240 (75903 samples) + * 99.0th: 30624 (17515 samples) + 99.9th: 68992 (1730 samples) + min=2423, max=95385 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9680 (21 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9599, max=9683 +current rps: 9677 +Wakeup Latencies percentiles (usec) runtime 30 (s) (291565 total samples) + 50.0th: 2292 (87922 samples) + 90.0th: 11920 (116353 samples) + * 99.0th: 24032 (26044 samples) + 99.9th: 64576 (2631 samples) + min=1, max=92186 +Request Latencies percentiles (usec) runtime 30 (s) (291532 total samples) + 50.0th: 7096 (86054 samples) + 90.0th: 16176 (113603 samples) + * 99.0th: 30560 (26258 samples) + 99.9th: 69504 (2601 samples) + min=2422, max=95709 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (31 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9599, max=9683 +current rps: 9663 +Wakeup Latencies percentiles (usec) runtime 30 (s) (291688 total samples) + 50.0th: 2284 (87484 samples) + 90.0th: 11920 (116872 samples) + * 99.0th: 24032 (26057 samples) + 99.9th: 64576 (2634 samples) + min=1, max=92186 +Request Latencies percentiles (usec) runtime 30 (s) (291713 total samples) + 50.0th: 7096 (86100 samples) + 90.0th: 16176 (113683 samples) + * 99.0th: 30560 (26274 samples) + 99.9th: 69504 (2603 samples) + min=2422, max=95709 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (31 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9599, max=9683 +average rps: 9724 +message_threads 1 +worker_threads 176 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/192.txt b/paper_results/schbench/linux_rr/192.txt new file mode 100644 index 0000000..f7c5fd2 --- /dev/null +++ b/paper_results/schbench/linux_rr/192.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97486 total samples) + 50.0th: 2500 (29281 samples) + 90.0th: 13360 (38947 samples) + * 99.0th: 26144 (8786 samples) + 99.9th: 67456 (863 samples) + min=1, max=82040 +Request Latencies percentiles (usec) runtime 10 (s) (97391 total samples) + 50.0th: 7640 (27253 samples) + 90.0th: 16544 (39292 samples) + * 99.0th: 36544 (8426 samples) + 99.9th: 70784 (880 samples) + min=2422, max=95939 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9648 (6 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9680 (5 samples) + min=9578, max=9678 +current rps: 9667 +Wakeup Latencies percentiles (usec) runtime 20 (s) (194720 total samples) + 50.0th: 2580 (58411 samples) + 90.0th: 13584 (77873 samples) + * 99.0th: 26720 (17517 samples) + 99.9th: 66176 (1753 samples) + min=1, max=82040 +Request Latencies percentiles (usec) runtime 20 (s) (194670 total samples) + 50.0th: 7576 (54143 samples) + 90.0th: 16544 (78144 samples) + * 99.0th: 35136 (17249 samples) + 99.9th: 69760 (1747 samples) + min=2422, max=95939 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9648 (11 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9680 (10 samples) + min=9578, max=9678 +current rps: 9651 +Wakeup Latencies percentiles (usec) runtime 30 (s) (291602 total samples) + 50.0th: 2596 (87469 samples) + 90.0th: 13520 (116597 samples) + * 99.0th: 26592 (26231 samples) + 99.9th: 66176 (2627 samples) + min=1, max=92966 +Request Latencies percentiles (usec) runtime 30 (s) (291532 total samples) + 50.0th: 7608 (81242 samples) + 90.0th: 16544 (117153 samples) + * 99.0th: 35264 (25687 samples) + 99.9th: 70016 (2564 samples) + min=2422, max=95939 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (17 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9680 (14 samples) + min=9578, max=9678 +current rps: 9666 +Wakeup Latencies percentiles (usec) runtime 30 (s) (291704 total samples) + 50.0th: 2596 (87476 samples) + 90.0th: 13520 (116664 samples) + * 99.0th: 26592 (26255 samples) + 99.9th: 66176 (2627 samples) + min=1, max=92966 +Request Latencies percentiles (usec) runtime 30 (s) (291727 total samples) + 50.0th: 7608 (81295 samples) + 90.0th: 16544 (117251 samples) + * 99.0th: 35264 (25698 samples) + 99.9th: 70016 (2564 samples) + min=2422, max=95939 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9648 (17 samples) + * 50.0th: 9648 (0 samples) + 90.0th: 9680 (14 samples) + min=9578, max=9678 +average rps: 9724 +message_threads 1 +worker_threads 192 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/208.txt b/paper_results/schbench/linux_rr/208.txt new file mode 100644 index 0000000..1c07712 --- /dev/null +++ b/paper_results/schbench/linux_rr/208.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97396 total samples) + 50.0th: 3372 (29235 samples) + 90.0th: 14992 (38980 samples) + * 99.0th: 28256 (8734 samples) + 99.9th: 66944 (871 samples) + min=1, max=98324 +Request Latencies percentiles (usec) runtime 10 (s) (97314 total samples) + 50.0th: 9328 (28891 samples) + 90.0th: 18784 (39639 samples) + * 99.0th: 37056 (7960 samples) + 99.9th: 70016 (877 samples) + min=2423, max=100798 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9680 (11 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9605, max=9686 +current rps: 9686 +Wakeup Latencies percentiles (usec) runtime 20 (s) (194908 total samples) + 50.0th: 3276 (58456 samples) + 90.0th: 14928 (78020 samples) + * 99.0th: 28640 (17479 samples) + 99.9th: 70016 (1748 samples) + min=1, max=98324 +Request Latencies percentiles (usec) runtime 20 (s) (194804 total samples) + 50.0th: 9328 (57727 samples) + 90.0th: 18784 (79230 samples) + * 99.0th: 37568 (15802 samples) + 99.9th: 71808 (1752 samples) + min=2423, max=100798 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9680 (21 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9605, max=9689 +current rps: 9689 +Wakeup Latencies percentiles (usec) runtime 30 (s) (292371 total samples) + 50.0th: 3236 (87725 samples) + 90.0th: 14896 (116862 samples) + * 99.0th: 29472 (26285 samples) + 99.9th: 70272 (2625 samples) + min=1, max=98324 +Request Latencies percentiles (usec) runtime 30 (s) (292276 total samples) + 50.0th: 9328 (87019 samples) + 90.0th: 18784 (119033 samples) + * 99.0th: 37696 (23484 samples) + 99.9th: 72576 (2618 samples) + min=2421, max=100798 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (31 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9605, max=9689 +current rps: 9680 +Wakeup Latencies percentiles (usec) runtime 30 (s) (292471 total samples) + 50.0th: 3236 (87742 samples) + 90.0th: 14896 (116930 samples) + * 99.0th: 29472 (26297 samples) + 99.9th: 70272 (2625 samples) + min=1, max=98324 +Request Latencies percentiles (usec) runtime 30 (s) (292491 total samples) + 50.0th: 9328 (87082 samples) + 90.0th: 18784 (119123 samples) + * 99.0th: 37696 (23501 samples) + 99.9th: 72576 (2618 samples) + min=2421, max=100798 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (31 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9605, max=9689 +average rps: 9750 +message_threads 1 +worker_threads 208 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/224.txt b/paper_results/schbench/linux_rr/224.txt new file mode 100644 index 0000000..b4ece9a --- /dev/null +++ b/paper_results/schbench/linux_rr/224.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97242 total samples) + 50.0th: 4120 (29204 samples) + 90.0th: 16416 (38864 samples) + * 99.0th: 30752 (8737 samples) + 99.9th: 71552 (876 samples) + min=1, max=88831 +Request Latencies percentiles (usec) runtime 10 (s) (97126 total samples) + 50.0th: 9392 (29696 samples) + 90.0th: 20320 (37147 samples) + * 99.0th: 40000 (8740 samples) + 99.9th: 74624 (862 samples) + min=2423, max=93085 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9616 (3 samples) + * 50.0th: 9648 (3 samples) + 90.0th: 9680 (4 samples) + min=9222, max=9696 +current rps: 9222 +Wakeup Latencies percentiles (usec) runtime 20 (s) (194525 total samples) + 50.0th: 4044 (58372 samples) + 90.0th: 16416 (77972 samples) + * 99.0th: 31648 (17306 samples) + 99.9th: 73600 (1756 samples) + min=1, max=93224 +Request Latencies percentiles (usec) runtime 20 (s) (194451 total samples) + 50.0th: 9392 (59378 samples) + 90.0th: 20000 (74085 samples) + * 99.0th: 46912 (17488 samples) + 99.9th: 76672 (1749 samples) + min=2423, max=99144 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9648 (6 samples) + * 50.0th: 9680 (13 samples) + 90.0th: 9680 (0 samples) + min=9222, max=9696 +current rps: 9688 +Wakeup Latencies percentiles (usec) runtime 30 (s) (291595 total samples) + 50.0th: 3940 (87502 samples) + 90.0th: 16416 (116792 samples) + * 99.0th: 32160 (26045 samples) + 99.9th: 73600 (2611 samples) + min=1, max=98139 +Request Latencies percentiles (usec) runtime 30 (s) (291508 total samples) + 50.0th: 9392 (88921 samples) + 90.0th: 19936 (111089 samples) + * 99.0th: 42432 (26211 samples) + 99.9th: 76928 (2629 samples) + min=2421, max=99144 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (29 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9222, max=9696 +current rps: 9675 +Wakeup Latencies percentiles (usec) runtime 30 (s) (291712 total samples) + 50.0th: 3940 (87529 samples) + 90.0th: 16416 (116838 samples) + * 99.0th: 32096 (26041 samples) + 99.9th: 73600 (2630 samples) + min=1, max=98139 +Request Latencies percentiles (usec) runtime 30 (s) (291734 total samples) + 50.0th: 9392 (88977 samples) + 90.0th: 19936 (111175 samples) + * 99.0th: 42432 (26244 samples) + 99.9th: 76928 (2629 samples) + min=2421, max=99144 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (29 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9680 (0 samples) + min=9222, max=9696 +average rps: 9724 +message_threads 1 +worker_threads 224 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/24.txt b/paper_results/schbench/linux_rr/24.txt new file mode 100644 index 0000000..eedb069 --- /dev/null +++ b/paper_results/schbench/linux_rr/24.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96386 total samples) + 50.0th: 6 (35218 samples) + 90.0th: 10 (20674 samples) + * 99.0th: 16 (7234 samples) + 99.9th: 54 (667 samples) + min=1, max=17128 +Request Latencies percentiles (usec) runtime 10 (s) (97196 total samples) + 50.0th: 2436 (34901 samples) + 90.0th: 2444 (15300 samples) + * 99.0th: 2476 (1849 samples) + 99.9th: 4696 (714 samples) + min=2414, max=11952 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9648 (3 samples) + * 50.0th: 9744 (8 samples) + 90.0th: 9744 (0 samples) + min=9605, max=9753 +current rps: 9741 +Wakeup Latencies percentiles (usec) runtime 20 (s) (192926 total samples) + 50.0th: 6 (71016 samples) + 90.0th: 10 (42645 samples) + * 99.0th: 16 (15043 samples) + 99.9th: 28 (1412 samples) + min=1, max=17128 +Request Latencies percentiles (usec) runtime 20 (s) (194648 total samples) + 50.0th: 2436 (68740 samples) + 90.0th: 2444 (30204 samples) + * 99.0th: 2468 (2782 samples) + 99.9th: 4680 (1359 samples) + min=2414, max=11952 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9744 (21 samples) + * 50.0th: 9744 (0 samples) + 90.0th: 9744 (0 samples) + min=9605, max=9753 +current rps: 9748 +Wakeup Latencies percentiles (usec) runtime 30 (s) (289481 total samples) + 50.0th: 6 (108307 samples) + 90.0th: 10 (63333 samples) + * 99.0th: 16 (22310 samples) + 99.9th: 26 (2072 samples) + min=1, max=17128 +Request Latencies percentiles (usec) runtime 30 (s) (292100 total samples) + 50.0th: 2436 (102030 samples) + 90.0th: 2444 (44629 samples) + * 99.0th: 2460 (3303 samples) + 99.9th: 4680 (2420 samples) + min=2414, max=12263 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9744 (31 samples) + * 50.0th: 9744 (0 samples) + 90.0th: 9744 (0 samples) + min=9605, max=9753 +current rps: 9744 +Wakeup Latencies percentiles (usec) runtime 30 (s) (289481 total samples) + 50.0th: 6 (108307 samples) + 90.0th: 10 (63333 samples) + * 99.0th: 16 (22310 samples) + 99.9th: 26 (2072 samples) + min=1, max=17128 +Request Latencies percentiles (usec) runtime 30 (s) (292124 total samples) + 50.0th: 2436 (102034 samples) + 90.0th: 2444 (44634 samples) + * 99.0th: 2460 (3307 samples) + 99.9th: 4680 (2426 samples) + min=2414, max=12263 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9744 (31 samples) + * 50.0th: 9744 (0 samples) + 90.0th: 9744 (0 samples) + min=9605, max=9753 +average rps: 9737 +message_threads 1 +worker_threads 24 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/240.txt b/paper_results/schbench/linux_rr/240.txt new file mode 100644 index 0000000..ec1fe14 --- /dev/null +++ b/paper_results/schbench/linux_rr/240.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97675 total samples) + 50.0th: 4376 (29307 samples) + 90.0th: 17632 (39088 samples) + * 99.0th: 35136 (8763 samples) + 99.9th: 70528 (877 samples) + min=1, max=89013 +Request Latencies percentiles (usec) runtime 10 (s) (97580 total samples) + 50.0th: 9424 (30434 samples) + 90.0th: 21088 (37702 samples) + * 99.0th: 53824 (8725 samples) + 99.9th: 76672 (880 samples) + min=2423, max=103033 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9680 (6 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9712 (5 samples) + min=9632, max=9707 +current rps: 9700 +Wakeup Latencies percentiles (usec) runtime 20 (s) (195636 total samples) + 50.0th: 4488 (58678 samples) + 90.0th: 17824 (78270 samples) + * 99.0th: 35008 (17557 samples) + 99.9th: 73088 (1743 samples) + min=1, max=89013 +Request Latencies percentiles (usec) runtime 20 (s) (195522 total samples) + 50.0th: 9424 (61119 samples) + 90.0th: 21152 (76464 samples) + * 99.0th: 54208 (16558 samples) + 99.9th: 76928 (1765 samples) + min=2423, max=103033 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9680 (9 samples) + * 50.0th: 9712 (12 samples) + 90.0th: 9712 (0 samples) + min=9632, max=9707 +current rps: 9702 +Wakeup Latencies percentiles (usec) runtime 30 (s) (294142 total samples) + 50.0th: 4504 (88221 samples) + 90.0th: 17760 (117498 samples) + * 99.0th: 34368 (26464 samples) + 99.9th: 73344 (2645 samples) + min=1, max=89013 +Request Latencies percentiles (usec) runtime 30 (s) (294054 total samples) + 50.0th: 9424 (92387 samples) + 90.0th: 21152 (114528 samples) + * 99.0th: 54592 (24933 samples) + 99.9th: 77440 (2647 samples) + min=2423, max=103033 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (9 samples) + * 50.0th: 9712 (22 samples) + 90.0th: 9712 (0 samples) + min=9632, max=9714 +current rps: 9698 +Wakeup Latencies percentiles (usec) runtime 30 (s) (294287 total samples) + 50.0th: 4504 (88242 samples) + 90.0th: 17824 (117870 samples) + * 99.0th: 34496 (26208 samples) + 99.9th: 73344 (2630 samples) + min=1, max=89013 +Request Latencies percentiles (usec) runtime 30 (s) (294304 total samples) + 50.0th: 9424 (92463 samples) + 90.0th: 21152 (114634 samples) + * 99.0th: 54592 (24955 samples) + 99.9th: 77440 (2647 samples) + min=2423, max=103033 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (9 samples) + * 50.0th: 9712 (22 samples) + 90.0th: 9712 (0 samples) + min=9632, max=9714 +average rps: 9810 +message_threads 1 +worker_threads 240 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/256.txt b/paper_results/schbench/linux_rr/256.txt new file mode 100644 index 0000000..42a3563 --- /dev/null +++ b/paper_results/schbench/linux_rr/256.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (98090 total samples) + 50.0th: 5208 (29416 samples) + 90.0th: 19040 (39295 samples) + * 99.0th: 34496 (8771 samples) + 99.9th: 73600 (875 samples) + min=1, max=92558 +Request Latencies percentiles (usec) runtime 10 (s) (98013 total samples) + 50.0th: 9680 (29355 samples) + 90.0th: 23328 (39328 samples) + * 99.0th: 53440 (8622 samples) + 99.9th: 78976 (890 samples) + min=2427, max=97715 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9680 (7 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9712 (4 samples) + min=9612, max=9706 +current rps: 9694 +Wakeup Latencies percentiles (usec) runtime 20 (s) (196316 total samples) + 50.0th: 4952 (58900 samples) + 90.0th: 18784 (78579 samples) + * 99.0th: 35264 (17610 samples) + 99.9th: 75904 (1763 samples) + min=1, max=100081 +Request Latencies percentiles (usec) runtime 20 (s) (196189 total samples) + 50.0th: 9744 (58623 samples) + 90.0th: 23200 (78431 samples) + * 99.0th: 55744 (17611 samples) + 99.9th: 79488 (1771 samples) + min=2422, max=107480 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9680 (12 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9712 (9 samples) + min=9612, max=9707 +current rps: 9685 +Wakeup Latencies percentiles (usec) runtime 30 (s) (294055 total samples) + 50.0th: 4888 (88120 samples) + 90.0th: 18720 (117833 samples) + * 99.0th: 35392 (26267 samples) + 99.9th: 74368 (2633 samples) + min=1, max=100081 +Request Latencies percentiles (usec) runtime 30 (s) (293990 total samples) + 50.0th: 9744 (87495 samples) + 90.0th: 23072 (117506 samples) + * 99.0th: 56128 (26391 samples) + 99.9th: 79232 (2641 samples) + min=2422, max=107480 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (22 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9712 (9 samples) + min=9612, max=9707 +current rps: 9692 +Wakeup Latencies percentiles (usec) runtime 30 (s) (294235 total samples) + 50.0th: 4888 (88200 samples) + 90.0th: 18720 (117889 samples) + * 99.0th: 35392 (26267 samples) + 99.9th: 74368 (2633 samples) + min=1, max=100081 +Request Latencies percentiles (usec) runtime 30 (s) (294253 total samples) + 50.0th: 9744 (87573 samples) + 90.0th: 23008 (117555 samples) + * 99.0th: 56128 (26486 samples) + 99.9th: 79232 (2641 samples) + min=2422, max=107480 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (22 samples) + * 50.0th: 9680 (0 samples) + 90.0th: 9712 (9 samples) + min=9612, max=9707 +average rps: 9808 +message_threads 1 +worker_threads 256 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/32.txt b/paper_results/schbench/linux_rr/32.txt new file mode 100644 index 0000000..30e3a9e --- /dev/null +++ b/paper_results/schbench/linux_rr/32.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96091 total samples) + 50.0th: 34 (28288 samples) + 90.0th: 196 (37398 samples) + * 99.0th: 1070 (8641 samples) + 99.9th: 1738 (854 samples) + min=1, max=48551 +Request Latencies percentiles (usec) runtime 10 (s) (96183 total samples) + 50.0th: 2732 (28581 samples) + 90.0th: 3364 (38569 samples) + * 99.0th: 4408 (8476 samples) + 99.9th: 49216 (855 samples) + min=2422, max=52382 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9616 (10 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9540, max=9637 +current rps: 9618 +Wakeup Latencies percentiles (usec) runtime 20 (s) (192231 total samples) + 50.0th: 33 (58001 samples) + 90.0th: 205 (76513 samples) + * 99.0th: 1078 (17277 samples) + 99.9th: 1810 (1735 samples) + min=1, max=49782 +Request Latencies percentiles (usec) runtime 20 (s) (192437 total samples) + 50.0th: 2740 (58605 samples) + 90.0th: 3348 (75984 samples) + * 99.0th: 4312 (17198 samples) + 99.9th: 49472 (1712 samples) + min=2422, max=52927 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9616 (19 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9540, max=9638 +current rps: 9625 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288339 total samples) + 50.0th: 33 (87038 samples) + 90.0th: 206 (114604 samples) + * 99.0th: 1090 (25956 samples) + 99.9th: 1810 (2563 samples) + min=1, max=51011 +Request Latencies percentiles (usec) runtime 30 (s) (288678 total samples) + 50.0th: 2732 (86144 samples) + 90.0th: 3348 (115449 samples) + * 99.0th: 4296 (25992 samples) + 99.9th: 49600 (2588 samples) + min=2421, max=52927 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9616 (28 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9540, max=9639 +current rps: 9615 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288340 total samples) + 50.0th: 33 (87038 samples) + 90.0th: 206 (114604 samples) + * 99.0th: 1090 (25956 samples) + 99.9th: 1810 (2563 samples) + min=1, max=51011 +Request Latencies percentiles (usec) runtime 30 (s) (288711 total samples) + 50.0th: 2732 (86155 samples) + 90.0th: 3348 (115466 samples) + * 99.0th: 4296 (25994 samples) + 99.9th: 49600 (2588 samples) + min=2421, max=52927 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9616 (28 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9540, max=9639 +average rps: 9624 +message_threads 1 +worker_threads 32 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/4.txt b/paper_results/schbench/linux_rr/4.txt new file mode 100644 index 0000000..a093de3 --- /dev/null +++ b/paper_results/schbench/linux_rr/4.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (16190 total samples) + 50.0th: 6 (10114 samples) + 90.0th: 7 (1708 samples) + * 99.0th: 20 (547 samples) + 99.9th: 4120 (124 samples) + min=1, max=8386 +Request Latencies percentiles (usec) runtime 10 (s) (16204 total samples) + 50.0th: 2436 (11928 samples) + 90.0th: 2436 (0 samples) + * 99.0th: 2612 (645 samples) + 99.9th: 4664 (148 samples) + min=2416, max=9189 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 1614 (5 samples) + * 50.0th: 1618 (1 samples) + 90.0th: 1630 (5 samples) + min=1607, max=1629 +current rps: 1629 +Wakeup Latencies percentiles (usec) runtime 20 (s) (32423 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (4300 samples) + * 99.0th: 14 (1038 samples) + 99.9th: 4296 (292 samples) + min=1, max=8386 +Request Latencies percentiles (usec) runtime 20 (s) (32462 total samples) + 50.0th: 2436 (19330 samples) + 90.0th: 2436 (0 samples) + * 99.0th: 2468 (883 samples) + 99.9th: 4664 (287 samples) + min=2416, max=9225 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 1614 (6 samples) + * 50.0th: 1626 (6 samples) + 90.0th: 1630 (9 samples) + min=1607, max=1630 +current rps: 1627 +Wakeup Latencies percentiles (usec) runtime 30 (s) (48682 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (7350 samples) + * 99.0th: 10 (2053 samples) + 99.9th: 4152 (375 samples) + min=1, max=8386 +Request Latencies percentiles (usec) runtime 30 (s) (48752 total samples) + 50.0th: 2436 (27503 samples) + 90.0th: 2436 (0 samples) + * 99.0th: 2460 (1027 samples) + 99.9th: 4664 (378 samples) + min=2416, max=9225 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 1618 (7 samples) + * 50.0th: 1630 (23 samples) + 90.0th: 1630 (0 samples) + min=1607, max=1632 +current rps: 1629 +Wakeup Latencies percentiles (usec) runtime 30 (s) (48682 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (7350 samples) + * 99.0th: 10 (2053 samples) + 99.9th: 4152 (375 samples) + min=1, max=8386 +Request Latencies percentiles (usec) runtime 30 (s) (48756 total samples) + 50.0th: 2436 (27504 samples) + 90.0th: 2436 (0 samples) + * 99.0th: 2460 (1028 samples) + 99.9th: 4664 (379 samples) + min=2416, max=9225 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 1618 (7 samples) + * 50.0th: 1630 (23 samples) + 90.0th: 1630 (0 samples) + min=1607, max=1632 +average rps: 1625 +message_threads 1 +worker_threads 4 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/40.txt b/paper_results/schbench/linux_rr/40.txt new file mode 100644 index 0000000..c4c1a6a --- /dev/null +++ b/paper_results/schbench/linux_rr/40.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96188 total samples) + 50.0th: 69 (28820 samples) + 90.0th: 741 (38029 samples) + * 99.0th: 2068 (8663 samples) + 99.9th: 3412 (846 samples) + min=1, max=51628 +Request Latencies percentiles (usec) runtime 10 (s) (96223 total samples) + 50.0th: 3068 (28697 samples) + 90.0th: 4296 (38185 samples) + * 99.0th: 6024 (8625 samples) + 99.9th: 52416 (870 samples) + min=2419, max=56434 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9616 (8 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9648 (3 samples) + min=9548, max=9634 +current rps: 9634 +Wakeup Latencies percentiles (usec) runtime 20 (s) (192402 total samples) + 50.0th: 68 (56983 samples) + 90.0th: 725 (76582 samples) + * 99.0th: 2052 (17308 samples) + 99.9th: 3260 (1696 samples) + min=1, max=51628 +Request Latencies percentiles (usec) runtime 20 (s) (192508 total samples) + 50.0th: 3060 (56748 samples) + 90.0th: 4296 (77374 samples) + * 99.0th: 5976 (16915 samples) + 99.9th: 52672 (1721 samples) + min=2419, max=57284 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9616 (17 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9648 (4 samples) + min=9548, max=9634 +current rps: 9634 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288677 total samples) + 50.0th: 68 (86060 samples) + 90.0th: 713 (114355 samples) + * 99.0th: 2052 (26043 samples) + 99.9th: 3236 (2533 samples) + min=1, max=52592 +Request Latencies percentiles (usec) runtime 30 (s) (288842 total samples) + 50.0th: 3068 (86100 samples) + 90.0th: 4280 (114935 samples) + * 99.0th: 5944 (25569 samples) + 99.9th: 52800 (2578 samples) + min=2419, max=57284 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9616 (22 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9648 (9 samples) + min=9548, max=9636 +current rps: 9630 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288682 total samples) + 50.0th: 68 (86061 samples) + 90.0th: 713 (114356 samples) + * 99.0th: 2052 (26043 samples) + 99.9th: 3236 (2534 samples) + min=1, max=52592 +Request Latencies percentiles (usec) runtime 30 (s) (288885 total samples) + 50.0th: 3068 (86112 samples) + 90.0th: 4280 (114951 samples) + * 99.0th: 5944 (25574 samples) + 99.9th: 52800 (2578 samples) + min=2419, max=57284 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9616 (22 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9648 (9 samples) + min=9548, max=9636 +average rps: 9630 +message_threads 1 +worker_threads 40 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/48.txt b/paper_results/schbench/linux_rr/48.txt new file mode 100644 index 0000000..f504a2e --- /dev/null +++ b/paper_results/schbench/linux_rr/48.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96222 total samples) + 50.0th: 107 (28861 samples) + 90.0th: 1366 (38348 samples) + * 99.0th: 2996 (8657 samples) + 99.9th: 6664 (863 samples) + min=1, max=55508 +Request Latencies percentiles (usec) runtime 10 (s) (96252 total samples) + 50.0th: 3364 (28947 samples) + 90.0th: 4824 (38562 samples) + * 99.0th: 7976 (8372 samples) + 99.9th: 53568 (869 samples) + min=2422, max=60163 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9584 (3 samples) + * 50.0th: 9616 (4 samples) + 90.0th: 9648 (4 samples) + min=9557, max=9638 +current rps: 9637 +Wakeup Latencies percentiles (usec) runtime 20 (s) (192517 total samples) + 50.0th: 108 (57484 samples) + 90.0th: 1354 (76696 samples) + * 99.0th: 2996 (17324 samples) + 99.9th: 6600 (1734 samples) + min=1, max=56972 +Request Latencies percentiles (usec) runtime 20 (s) (192593 total samples) + 50.0th: 3356 (57822 samples) + 90.0th: 4824 (77639 samples) + * 99.0th: 7848 (16377 samples) + 99.9th: 54080 (1732 samples) + min=2422, max=61939 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9616 (11 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9648 (10 samples) + min=9557, max=9642 +current rps: 9622 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288834 total samples) + 50.0th: 108 (86567 samples) + 90.0th: 1350 (115077 samples) + * 99.0th: 3036 (25942 samples) + 99.9th: 6648 (2589 samples) + min=1, max=56972 +Request Latencies percentiles (usec) runtime 30 (s) (288963 total samples) + 50.0th: 3356 (86665 samples) + 90.0th: 4808 (115225 samples) + * 99.0th: 7800 (26003 samples) + 99.9th: 54080 (2620 samples) + min=2422, max=61939 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9616 (14 samples) + * 50.0th: 9648 (17 samples) + 90.0th: 9648 (0 samples) + min=9557, max=9643 +current rps: 9643 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288853 total samples) + 50.0th: 108 (86573 samples) + 90.0th: 1350 (115087 samples) + * 99.0th: 3036 (25945 samples) + 99.9th: 6648 (2589 samples) + min=1, max=56972 +Request Latencies percentiles (usec) runtime 30 (s) (289012 total samples) + 50.0th: 3356 (86682 samples) + 90.0th: 4808 (115246 samples) + * 99.0th: 7800 (26007 samples) + 99.9th: 54080 (2620 samples) + min=2422, max=61939 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9616 (14 samples) + * 50.0th: 9648 (17 samples) + 90.0th: 9648 (0 samples) + min=9557, max=9643 +average rps: 9634 +message_threads 1 +worker_threads 48 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/56.txt b/paper_results/schbench/linux_rr/56.txt new file mode 100644 index 0000000..e43fec7 --- /dev/null +++ b/paper_results/schbench/linux_rr/56.txt @@ -0,0 +1,36 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96816 total samples) + 50.0th: 174 (28909 samples) + 90.0th: 1934 (38725 samples) + * 99.0th: 4632 (8716 samples) + 99.9th: 9200 (863 samples) + min=1, max=56560 +Request Latencies percentiles (usec) runtime 10 (s) (96821 total samples) + 50.0th: 3628 (29108 samples) + 90.0th: 5528 (38633 samples) + * 99.0th: 9584 (8747 samples) + 99.9th: 53952 (800 samples) + min=2422, max=61882 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9616 (10 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9616, max=10094 +current rps: 9623 +Wakeup Latencies percentiles (usec) runtime 10 (s) (96836 total samples) + 50.0th: 174 (28911 samples) + 90.0th: 1934 (38733 samples) + * 99.0th: 4632 (8722 samples) + 99.9th: 9200 (864 samples) + min=1, max=56560 +Request Latencies percentiles (usec) runtime 10 (s) (96879 total samples) + 50.0th: 3628 (29129 samples) + 90.0th: 5528 (38646 samples) + * 99.0th: 9584 (8752 samples) + 99.9th: 53952 (808 samples) + min=2422, max=61882 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9616 (10 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9616, max=10094 +average rps: 9688 diff --git a/paper_results/schbench/linux_rr/64.txt b/paper_results/schbench/linux_rr/64.txt new file mode 100644 index 0000000..4d75a9b --- /dev/null +++ b/paper_results/schbench/linux_rr/64.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (95616 total samples) + 50.0th: 344 (28646 samples) + 90.0th: 2348 (38284 samples) + * 99.0th: 6600 (8508 samples) + 99.9th: 14576 (857 samples) + min=1, max=61362 +Request Latencies percentiles (usec) runtime 10 (s) (95593 total samples) + 50.0th: 3924 (28512 samples) + 90.0th: 6728 (38170 samples) + * 99.0th: 11952 (8570 samples) + 99.9th: 55104 (868 samples) + min=2424, max=63954 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9520 (3 samples) + * 50.0th: 9552 (6 samples) + 90.0th: 9584 (2 samples) + min=9496, max=9573 +current rps: 9558 +Wakeup Latencies percentiles (usec) runtime 20 (s) (191328 total samples) + 50.0th: 347 (56873 samples) + 90.0th: 2348 (76658 samples) + * 99.0th: 6664 (17077 samples) + 99.9th: 17696 (1709 samples) + min=1, max=65365 +Request Latencies percentiles (usec) runtime 20 (s) (191344 total samples) + 50.0th: 3908 (57440 samples) + 90.0th: 6664 (76529 samples) + * 99.0th: 11984 (17260 samples) + 99.9th: 55616 (1674 samples) + min=2421, max=68349 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9552 (17 samples) + * 50.0th: 9552 (0 samples) + 90.0th: 9584 (4 samples) + min=9496, max=9573 +current rps: 9559 +Wakeup Latencies percentiles (usec) runtime 30 (s) (287039 total samples) + 50.0th: 344 (85715 samples) + 90.0th: 2348 (115007 samples) + * 99.0th: 6680 (25574 samples) + 99.9th: 15280 (2562 samples) + min=1, max=65365 +Request Latencies percentiles (usec) runtime 30 (s) (287071 total samples) + 50.0th: 3908 (86192 samples) + 90.0th: 6664 (114759 samples) + * 99.0th: 11984 (25850 samples) + 99.9th: 55744 (2527 samples) + min=2420, max=68349 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9552 (25 samples) + * 50.0th: 9552 (0 samples) + 90.0th: 9584 (6 samples) + min=9496, max=9579 +current rps: 9568 +Wakeup Latencies percentiles (usec) runtime 30 (s) (287053 total samples) + 50.0th: 344 (85717 samples) + 90.0th: 2348 (115010 samples) + * 99.0th: 6680 (25575 samples) + 99.9th: 15280 (2562 samples) + min=1, max=65365 +Request Latencies percentiles (usec) runtime 30 (s) (287138 total samples) + 50.0th: 3908 (86210 samples) + 90.0th: 6664 (114780 samples) + * 99.0th: 11984 (25861 samples) + 99.9th: 55744 (2527 samples) + min=2420, max=68349 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9552 (25 samples) + * 50.0th: 9552 (0 samples) + 90.0th: 9584 (6 samples) + min=9496, max=9579 +average rps: 9571 +message_threads 1 +worker_threads 64 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/72.txt b/paper_results/schbench/linux_rr/72.txt new file mode 100644 index 0000000..dec8ab3 --- /dev/null +++ b/paper_results/schbench/linux_rr/72.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (95975 total samples) + 50.0th: 474 (28781 samples) + 90.0th: 2988 (38389 samples) + * 99.0th: 8528 (8638 samples) + 99.9th: 50496 (861 samples) + min=1, max=64772 +Request Latencies percentiles (usec) runtime 10 (s) (95955 total samples) + 50.0th: 4248 (28701 samples) + 90.0th: 7160 (38758 samples) + * 99.0th: 14320 (8255 samples) + 99.9th: 55744 (868 samples) + min=2423, max=65142 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9584 (7 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9616 (4 samples) + min=9479, max=9619 +current rps: 9600 +Wakeup Latencies percentiles (usec) runtime 20 (s) (192278 total samples) + 50.0th: 490 (57656 samples) + 90.0th: 2972 (76833 samples) + * 99.0th: 8400 (17302 samples) + 99.9th: 50368 (1733 samples) + min=1, max=70418 +Request Latencies percentiles (usec) runtime 20 (s) (192294 total samples) + 50.0th: 4248 (57729 samples) + 90.0th: 7144 (77083 samples) + * 99.0th: 14320 (17153 samples) + 99.9th: 56128 (1657 samples) + min=2419, max=76397 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9584 (11 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9616 (10 samples) + min=9479, max=9619 +current rps: 9600 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288427 total samples) + 50.0th: 491 (86558 samples) + 90.0th: 2972 (115364 samples) + * 99.0th: 8336 (25936 samples) + 99.9th: 51008 (2590 samples) + min=1, max=70418 +Request Latencies percentiles (usec) runtime 30 (s) (288465 total samples) + 50.0th: 4264 (87038 samples) + 90.0th: 7144 (115223 samples) + * 99.0th: 14288 (25466 samples) + 99.9th: 56384 (2569 samples) + min=2419, max=76397 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9584 (17 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9616 (14 samples) + min=9479, max=9619 +current rps: 9592 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288457 total samples) + 50.0th: 491 (86563 samples) + 90.0th: 2972 (115376 samples) + * 99.0th: 8336 (25939 samples) + 99.9th: 51008 (2591 samples) + min=1, max=70418 +Request Latencies percentiles (usec) runtime 30 (s) (288537 total samples) + 50.0th: 4264 (87064 samples) + 90.0th: 7144 (115253 samples) + * 99.0th: 14288 (25474 samples) + 99.9th: 56384 (2569 samples) + min=2419, max=76397 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9584 (17 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9616 (14 samples) + min=9479, max=9619 +average rps: 9618 +message_threads 1 +worker_threads 72 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/8.txt b/paper_results/schbench/linux_rr/8.txt new file mode 100644 index 0000000..4dc75ee --- /dev/null +++ b/paper_results/schbench/linux_rr/8.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (32493 total samples) + 50.0th: 6 (19729 samples) + 90.0th: 7 (4380 samples) + * 99.0th: 9 (875 samples) + 99.9th: 4216 (177 samples) + min=1, max=7914 +Request Latencies percentiles (usec) runtime 10 (s) (32544 total samples) + 50.0th: 2428 (9022 samples) + 90.0th: 2436 (13285 samples) + * 99.0th: 2468 (2609 samples) + 99.9th: 4696 (171 samples) + min=2415, max=9215 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 3252 (7 samples) + * 50.0th: 3252 (0 samples) + 90.0th: 3260 (4 samples) + min=3245, max=3261 +current rps: 3255 +Wakeup Latencies percentiles (usec) runtime 20 (s) (64941 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (10213 samples) + * 99.0th: 9 (2467 samples) + 99.9th: 4472 (461 samples) + min=1, max=7961 +Request Latencies percentiles (usec) runtime 20 (s) (65054 total samples) + 50.0th: 2436 (33082 samples) + 90.0th: 2444 (3183 samples) + * 99.0th: 2476 (3192 samples) + 99.9th: 4504 (169 samples) + min=2415, max=9215 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 3252 (17 samples) + * 50.0th: 3252 (0 samples) + 90.0th: 3260 (4 samples) + min=3245, max=3261 +current rps: 3252 +Wakeup Latencies percentiles (usec) runtime 30 (s) (97389 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (15037 samples) + * 99.0th: 9 (3856 samples) + 99.9th: 4616 (772 samples) + min=1, max=7961 +Request Latencies percentiles (usec) runtime 30 (s) (97559 total samples) + 50.0th: 2436 (48904 samples) + 90.0th: 2444 (5045 samples) + * 99.0th: 2476 (5524 samples) + 99.9th: 4456 (314 samples) + min=2415, max=11235 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 3252 (27 samples) + * 50.0th: 3252 (0 samples) + 90.0th: 3260 (4 samples) + min=3235, max=3261 +current rps: 3249 +Wakeup Latencies percentiles (usec) runtime 30 (s) (97389 total samples) + 50.0th: 6 (0 samples) + 90.0th: 7 (15037 samples) + * 99.0th: 9 (3856 samples) + 99.9th: 4616 (772 samples) + min=1, max=7961 +Request Latencies percentiles (usec) runtime 30 (s) (97567 total samples) + 50.0th: 2436 (48907 samples) + 90.0th: 2444 (5048 samples) + * 99.0th: 2476 (5524 samples) + 99.9th: 4456 (315 samples) + min=2415, max=11235 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 3252 (27 samples) + * 50.0th: 3252 (0 samples) + 90.0th: 3260 (4 samples) + min=3235, max=3261 +average rps: 3252 +message_threads 1 +worker_threads 8 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/80.txt b/paper_results/schbench/linux_rr/80.txt new file mode 100644 index 0000000..c019f99 --- /dev/null +++ b/paper_results/schbench/linux_rr/80.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (95853 total samples) + 50.0th: 675 (28761 samples) + 90.0th: 3940 (38319 samples) + * 99.0th: 9456 (8607 samples) + 99.9th: 52672 (857 samples) + min=1, max=62026 +Request Latencies percentiles (usec) runtime 10 (s) (95824 total samples) + 50.0th: 4696 (28932 samples) + 90.0th: 7544 (38126 samples) + * 99.0th: 15824 (8593 samples) + 99.9th: 56640 (862 samples) + min=2422, max=67148 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9552 (7 samples) + * 50.0th: 9552 (0 samples) + 90.0th: 9584 (4 samples) + min=9510, max=9583 +current rps: 9578 +Wakeup Latencies percentiles (usec) runtime 20 (s) (191806 total samples) + 50.0th: 665 (57449 samples) + 90.0th: 3932 (76735 samples) + * 99.0th: 9456 (17258 samples) + 99.9th: 52160 (1696 samples) + min=1, max=62026 +Request Latencies percentiles (usec) runtime 20 (s) (191788 total samples) + 50.0th: 4696 (57863 samples) + 90.0th: 7496 (76187 samples) + * 99.0th: 15792 (17260 samples) + 99.9th: 57280 (1717 samples) + min=2422, max=67148 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9552 (9 samples) + * 50.0th: 9584 (12 samples) + 90.0th: 9584 (0 samples) + min=9510, max=9587 +current rps: 9587 +Wakeup Latencies percentiles (usec) runtime 30 (s) (287803 total samples) + 50.0th: 673 (86046 samples) + 90.0th: 3940 (115031 samples) + * 99.0th: 9552 (25896 samples) + 99.9th: 52288 (2593 samples) + min=1, max=68559 +Request Latencies percentiles (usec) runtime 30 (s) (287813 total samples) + 50.0th: 4696 (87598 samples) + 90.0th: 7512 (113762 samples) + * 99.0th: 15728 (25877 samples) + 99.9th: 57280 (2604 samples) + min=2422, max=75038 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9552 (11 samples) + * 50.0th: 9584 (20 samples) + 90.0th: 9584 (0 samples) + min=9510, max=9587 +current rps: 9557 +Wakeup Latencies percentiles (usec) runtime 30 (s) (287832 total samples) + 50.0th: 673 (86048 samples) + 90.0th: 3940 (115051 samples) + * 99.0th: 9552 (25902 samples) + 99.9th: 52288 (2593 samples) + min=1, max=68559 +Request Latencies percentiles (usec) runtime 30 (s) (287893 total samples) + 50.0th: 4696 (87609 samples) + 90.0th: 7512 (113801 samples) + * 99.0th: 15728 (25892 samples) + 99.9th: 57280 (2604 samples) + min=2422, max=75038 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9552 (11 samples) + * 50.0th: 9584 (20 samples) + 90.0th: 9584 (0 samples) + min=9510, max=9587 +average rps: 9596 +message_threads 1 +worker_threads 80 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/88.txt b/paper_results/schbench/linux_rr/88.txt new file mode 100644 index 0000000..5e9932a --- /dev/null +++ b/paper_results/schbench/linux_rr/88.txt @@ -0,0 +1,36 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96376 total samples) + 50.0th: 873 (28885 samples) + 90.0th: 4584 (38628 samples) + * 99.0th: 11280 (8571 samples) + 99.9th: 53056 (866 samples) + min=1, max=70056 +Request Latencies percentiles (usec) runtime 10 (s) (96345 total samples) + 50.0th: 4728 (30629 samples) + 90.0th: 8656 (36702 samples) + * 99.0th: 16736 (8676 samples) + 99.9th: 57792 (843 samples) + min=2422, max=73998 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9552 (3 samples) + * 50.0th: 9584 (3 samples) + 90.0th: 9616 (4 samples) + min=9478, max=10063 +current rps: 9600 +Wakeup Latencies percentiles (usec) runtime 10 (s) (96424 total samples) + 50.0th: 873 (28906 samples) + 90.0th: 4584 (38632 samples) + * 99.0th: 11280 (8578 samples) + 99.9th: 53056 (866 samples) + min=1, max=70056 +Request Latencies percentiles (usec) runtime 10 (s) (96441 total samples) + 50.0th: 4728 (30648 samples) + 90.0th: 8656 (36755 samples) + * 99.0th: 16736 (8684 samples) + 99.9th: 57792 (843 samples) + min=2422, max=73998 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9552 (3 samples) + * 50.0th: 9584 (3 samples) + 90.0th: 9616 (4 samples) + min=9478, max=10063 +average rps: 9644 diff --git a/paper_results/schbench/linux_rr/96.txt b/paper_results/schbench/linux_rr/96.txt new file mode 100644 index 0000000..651c512 --- /dev/null +++ b/paper_results/schbench/linux_rr/96.txt @@ -0,0 +1,76 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (96239 total samples) + 50.0th: 1046 (28852 samples) + 90.0th: 5192 (38437 samples) + * 99.0th: 12496 (8655 samples) + 99.9th: 53824 (866 samples) + min=1, max=65989 +Request Latencies percentiles (usec) runtime 10 (s) (96200 total samples) + 50.0th: 4744 (32108 samples) + 90.0th: 9456 (35725 samples) + * 99.0th: 18528 (8084 samples) + 99.9th: 58432 (865 samples) + min=2423, max=69831 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9584 (3 samples) + * 50.0th: 9616 (8 samples) + 90.0th: 9616 (0 samples) + min=9544, max=9615 +current rps: 9609 +Wakeup Latencies percentiles (usec) runtime 20 (s) (192536 total samples) + 50.0th: 1015 (57706 samples) + 90.0th: 5128 (77032 samples) + * 99.0th: 12560 (17260 samples) + 99.9th: 53184 (1731 samples) + min=1, max=68932 +Request Latencies percentiles (usec) runtime 20 (s) (192512 total samples) + 50.0th: 4744 (64551 samples) + 90.0th: 9456 (71470 samples) + * 99.0th: 19040 (16025 samples) + 99.9th: 59072 (1694 samples) + min=2419, max=71348 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9584 (5 samples) + * 50.0th: 9616 (16 samples) + 90.0th: 9616 (0 samples) + min=9544, max=9622 +current rps: 9598 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288900 total samples) + 50.0th: 1019 (86620 samples) + 90.0th: 5144 (115583 samples) + * 99.0th: 12624 (25878 samples) + 99.9th: 53568 (2593 samples) + min=1, max=84923 +Request Latencies percentiles (usec) runtime 30 (s) (288911 total samples) + 50.0th: 4744 (97308 samples) + 90.0th: 9456 (106890 samples) + * 99.0th: 19040 (23941 samples) + 99.9th: 59456 (2519 samples) + min=2419, max=88006 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9616 (31 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9544, max=9623 +current rps: 9612 +Wakeup Latencies percentiles (usec) runtime 30 (s) (288956 total samples) + 50.0th: 1019 (86640 samples) + 90.0th: 5144 (115597 samples) + * 99.0th: 12624 (25885 samples) + 99.9th: 53568 (2600 samples) + min=1, max=84923 +Request Latencies percentiles (usec) runtime 30 (s) (289011 total samples) + 50.0th: 4744 (97337 samples) + 90.0th: 9456 (106927 samples) + * 99.0th: 19040 (23947 samples) + 99.9th: 59456 (2526 samples) + min=2419, max=88006 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9616 (31 samples) + * 50.0th: 9616 (0 samples) + 90.0th: 9616 (0 samples) + min=9544, max=9623 +average rps: 9634 +message_threads 1 +worker_threads 96 +operations 10 +matrix_size 73 diff --git a/paper_results/schbench/linux_rr/all.csv b/paper_results/schbench/linux_rr/all.csv new file mode 100644 index 0000000..930a667 --- /dev/null +++ b/paper_results/schbench/linux_rr/all.csv @@ -0,0 +1,24 @@ +cores,wake99,rps50,lat99 +4,10,1630,2460 +8,9,3252,2476 +16,10,6504,2452 +24,16,9744,2460 +32,1090,9616,4296 +40,2052,9616,5944 +48,3036,9648,7800 +56,4632,9616,9584 +64,6680,9552,11984 +72,8336,9584,14288 +80,9552,9584,15728 +88,11280,9584,16736 +96,12624,9616,19040 +112,14960,9616,21280 +128,18016,9648,23648 +144,19680,9648,27744 +160,22496,9680,28256 +176,24032,9680,30560 +192,26592,9648,35264 +208,29472,9680,37696 +224,32096,9680,42432 +240,34496,9712,54592 +256,35392,9680,56128 diff --git a/paper_results/schbench/skyloft_cfs50us/16.txt b/paper_results/schbench/skyloft_cfs50us/16.txt new file mode 100644 index 0000000..3f67c96 --- /dev/null +++ b/paper_results/schbench/skyloft_cfs50us/16.txt @@ -0,0 +1,18 @@ +Wakeup Latencies percentiles (usec) runtime 1 (s) (6237 total samples) + 50.0th: 2 (2978 samples) + 90.0th: 2 (0 samples) + * 99.0th: 3 (232 samples) + 99.9th: 16 (55 samples) + min=1, max=24 +Request Latencies percentiles (usec) runtime 1 (s) (6258 total samples) + 50.0th: 2516 (0 samples) + 90.0th: 2644 (2327 samples) + * 99.0th: 2644 (0 samples) + 99.9th: 2652 (4 samples) + min=2511, max=2747 +RPS percentiles (requests) runtime 1 (s) (2 total samples) + 20.0th: 0 (1 samples) + * 50.0th: 0 (0 samples) + 90.0th: 6248 (1 samples) + min=6241, max=6241 +average rps: 6258 diff --git a/paper_results/schbench/skyloft_cfs50us/24.txt b/paper_results/schbench/skyloft_cfs50us/24.txt new file mode 100644 index 0000000..d6b700b --- /dev/null +++ b/paper_results/schbench/skyloft_cfs50us/24.txt @@ -0,0 +1,18 @@ +Wakeup Latencies percentiles (usec) runtime 1 (s) (9092 total samples) + 50.0th: 2 (4476 samples) + 90.0th: 2 (0 samples) + * 99.0th: 4 (318 samples) + 99.9th: 21 (31 samples) + min=1, max=33 +Request Latencies percentiles (usec) runtime 1 (s) (9344 total samples) + 50.0th: 2516 (0 samples) + 90.0th: 2644 (4093 samples) + * 99.0th: 2676 (404 samples) + 99.9th: 2764 (17 samples) + min=2511, max=3312 +RPS percentiles (requests) runtime 1 (s) (2 total samples) + 20.0th: 0 (1 samples) + * 50.0th: 0 (0 samples) + 90.0th: 9296 (1 samples) + min=9294, max=9294 +average rps: 9344 \ No newline at end of file diff --git a/paper_results/schbench/skyloft_cfs50us/32.txt b/paper_results/schbench/skyloft_cfs50us/32.txt new file mode 100644 index 0000000..ac27b78 --- /dev/null +++ b/paper_results/schbench/skyloft_cfs50us/32.txt @@ -0,0 +1,18 @@ +Wakeup Latencies percentiles (usec) runtime 1 (s) (9333 total samples) + 50.0th: 2 (2991 samples) + 90.0th: 28 (3193 samples) + * 99.0th: 34 (661 samples) + 99.9th: 53 (36 samples) + min=1, max=61 +Request Latencies percentiles (usec) runtime 1 (s) (9590 total samples) + 50.0th: 2644 (1738 samples) + 90.0th: 4808 (3712 samples) + * 99.0th: 4808 (0 samples) + 99.9th: 4936 (50 samples) + min=2511, max=5239 +RPS percentiles (requests) runtime 1 (s) (2 total samples) + 20.0th: 0 (1 samples) + * 50.0th: 0 (0 samples) + 90.0th: 9520 (1 samples) + min=9530, max=9530 +average rps: 9590 \ No newline at end of file diff --git a/paper_results/schbench/skyloft_cfs50us/4.txt b/paper_results/schbench/skyloft_cfs50us/4.txt new file mode 100644 index 0000000..4d801c6 --- /dev/null +++ b/paper_results/schbench/skyloft_cfs50us/4.txt @@ -0,0 +1,18 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (15076 total samples) + 50.0th: 1 (0 samples) + 90.0th: 2 (5753 samples) + * 99.0th: 2 (0 samples) + 99.9th: 3 (48 samples) + min=1, max=6 +Request Latencies percentiles (usec) runtime 10 (s) (15089 total samples) + 50.0th: 2660 (0 samples) + 90.0th: 2660 (0 samples) + * 99.0th: 2668 (242 samples) + 99.9th: 2668 (0 samples) + min=2525, max=2722 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 1510 (11 samples) + * 50.0th: 1510 (0 samples) + 90.0th: 1510 (0 samples) + min=1506, max=1511 +average rps: 1509 diff --git a/paper_results/schbench/skyloft_cfs50us/40.txt b/paper_results/schbench/skyloft_cfs50us/40.txt new file mode 100644 index 0000000..ecca6fb --- /dev/null +++ b/paper_results/schbench/skyloft_cfs50us/40.txt @@ -0,0 +1,18 @@ +Wakeup Latencies percentiles (usec) runtime 1 (s) (9444 total samples) + 50.0th: 21 (1925 samples) + 90.0th: 31 (3857 samples) + * 99.0th: 34 (775 samples) + 99.9th: 51 (68 samples) + min=1, max=71 +Request Latencies percentiles (usec) runtime 1 (s) (9737 total samples) + 50.0th: 4808 (6786 samples) + 90.0th: 4808 (0 samples) + * 99.0th: 4856 (316 samples) + 99.9th: 5128 (37 samples) + min=2512, max=5270 +RPS percentiles (requests) runtime 1 (s) (2 total samples) + 20.0th: 0 (1 samples) + * 50.0th: 0 (0 samples) + 90.0th: 9648 (1 samples) + min=9660, max=9660 +average rps: 9737 \ No newline at end of file diff --git a/paper_results/schbench/skyloft_cfs50us/48.txt b/paper_results/schbench/skyloft_cfs50us/48.txt new file mode 100644 index 0000000..31d822e --- /dev/null +++ b/paper_results/schbench/skyloft_cfs50us/48.txt @@ -0,0 +1,18 @@ +Wakeup Latencies percentiles (usec) runtime 1 (s) (9763 total samples) + 50.0th: 23 (2887 samples) + 90.0th: 30 (3867 samples) + * 99.0th: 35 (890 samples) + 99.9th: 71 (39 samples) + min=1, max=78 +Request Latencies percentiles (usec) runtime 1 (s) (9970 total samples) + 50.0th: 4808 (0 samples) + 90.0th: 4808 (0 samples) + * 99.0th: 5000 (677 samples) + 99.9th: 6008 (74 samples) + min=2602, max=7962 +RPS percentiles (requests) runtime 1 (s) (2 total samples) + 20.0th: 0 (1 samples) + * 50.0th: 0 (0 samples) + 90.0th: 9904 (1 samples) + min=9891, max=9891 +average rps: 9970 \ No newline at end of file diff --git a/paper_results/schbench/skyloft_cfs50us/56.txt b/paper_results/schbench/skyloft_cfs50us/56.txt new file mode 100644 index 0000000..f358591 --- /dev/null +++ b/paper_results/schbench/skyloft_cfs50us/56.txt @@ -0,0 +1,18 @@ +Wakeup Latencies percentiles (usec) runtime 1 (s) (9719 total samples) + 50.0th: 23 (3181 samples) + 90.0th: 34 (3703 samples) + * 99.0th: 79 (805 samples) + 99.9th: 105 (71 samples) + min=1, max=113 +Request Latencies percentiles (usec) runtime 1 (s) (9918 total samples) + 50.0th: 4808 (0 samples) + 90.0th: 9680 (2385 samples) + * 99.0th: 9776 (843 samples) + 99.9th: 10096 (43 samples) + min=2512, max=16424 +RPS percentiles (requests) runtime 1 (s) (2 total samples) + 20.0th: 0 (1 samples) + * 50.0th: 0 (0 samples) + 90.0th: 9840 (1 samples) + min=9836, max=9836 +average rps: 9918 \ No newline at end of file diff --git a/paper_results/schbench/skyloft_cfs50us/64.txt b/paper_results/schbench/skyloft_cfs50us/64.txt new file mode 100644 index 0000000..c5491f1 --- /dev/null +++ b/paper_results/schbench/skyloft_cfs50us/64.txt @@ -0,0 +1,18 @@ +Wakeup Latencies percentiles (usec) runtime 1 (s) (9704 total samples) + 50.0th: 20 (2900 samples) + 90.0th: 71 (3999 samples) + * 99.0th: 81 (746 samples) + 99.9th: 124 (88 samples) + min=1, max=132 +Request Latencies percentiles (usec) runtime 1 (s) (9896 total samples) + 50.0th: 7224 (1422 samples) + 90.0th: 9744 (4057 samples) + * 99.0th: 9776 (140 samples) + 99.9th: 10160 (76 samples) + min=2512, max=10266 +RPS percentiles (requests) runtime 1 (s) (2 total samples) + 20.0th: 0 (1 samples) + * 50.0th: 0 (0 samples) + 90.0th: 9808 (1 samples) + min=9799, max=9799 +average rps: 9896 \ No newline at end of file diff --git a/paper_results/schbench/skyloft_cfs50us/72.txt b/paper_results/schbench/skyloft_cfs50us/72.txt new file mode 100644 index 0000000..10137da --- /dev/null +++ b/paper_results/schbench/skyloft_cfs50us/72.txt @@ -0,0 +1,18 @@ +Wakeup Latencies percentiles (usec) runtime 1 (s) (9733 total samples) + 50.0th: 15 (2542 samples) + 90.0th: 72 (3831 samples) + * 99.0th: 83 (786 samples) + 99.9th: 136 (81 samples) + min=1, max=144 +Request Latencies percentiles (usec) runtime 1 (s) (9855 total samples) + 50.0th: 7384 (3132 samples) + 90.0th: 9744 (4349 samples) + * 99.0th: 9872 (305 samples) + 99.9th: 10288 (86 samples) + min=2511, max=13044 +RPS percentiles (requests) runtime 1 (s) (2 total samples) + 20.0th: 0 (1 samples) + * 50.0th: 0 (0 samples) + 90.0th: 9776 (1 samples) + min=9760, max=9760 +average rps: 9855 \ No newline at end of file diff --git a/paper_results/schbench/skyloft_cfs50us/8.txt b/paper_results/schbench/skyloft_cfs50us/8.txt new file mode 100644 index 0000000..440e8c6 --- /dev/null +++ b/paper_results/schbench/skyloft_cfs50us/8.txt @@ -0,0 +1,18 @@ +Wakeup Latencies percentiles (usec) runtime 1 (s) (3076 total samples) + 50.0th: 1 (0 samples) + 90.0th: 2 (1267 samples) + * 99.0th: 2 (0 samples) + 99.9th: 7 (15 samples) + min=1, max=7 +Request Latencies percentiles (usec) runtime 1 (s) (3076 total samples) + 50.0th: 2636 (851 samples) + 90.0th: 2644 (1376 samples) + * 99.0th: 2644 (0 samples) + 99.9th: 2644 (0 samples) + min=2511, max=2714 +RPS percentiles (requests) runtime 1 (s) (2 total samples) + 20.0th: 0 (1 samples) + * 50.0th: 0 (0 samples) + 90.0th: 3068 (1 samples) + min=3068, max=3068 +average rps: 3076 \ No newline at end of file diff --git a/paper_results/schbench/skyloft_cfs50us/80.txt b/paper_results/schbench/skyloft_cfs50us/80.txt new file mode 100644 index 0000000..a260981 --- /dev/null +++ b/paper_results/schbench/skyloft_cfs50us/80.txt @@ -0,0 +1,18 @@ +Wakeup Latencies percentiles (usec) runtime 1 (s) (9718 total samples) + 50.0th: 17 (2792 samples) + 90.0th: 77 (3716 samples) + * 99.0th: 116 (751 samples) + 99.9th: 149 (76 samples) + min=1, max=179 +Request Latencies percentiles (usec) runtime 1 (s) (9857 total samples) + 50.0th: 9648 (3377 samples) + 90.0th: 12176 (3914 samples) + * 99.0th: 12240 (520 samples) + 99.9th: 12688 (58 samples) + min=2511, max=12818 +RPS percentiles (requests) runtime 1 (s) (2 total samples) + 20.0th: 0 (1 samples) + * 50.0th: 0 (0 samples) + 90.0th: 9744 (1 samples) + min=9740, max=9740 +average rps: 9857 \ No newline at end of file diff --git a/paper_results/schbench/skyloft_cfs50us/88.txt b/paper_results/schbench/skyloft_cfs50us/88.txt new file mode 100644 index 0000000..d424995 --- /dev/null +++ b/paper_results/schbench/skyloft_cfs50us/88.txt @@ -0,0 +1,18 @@ +Wakeup Latencies percentiles (usec) runtime 1 (s) (9540 total samples) + 50.0th: 16 (2649 samples) + 90.0th: 104 (3872 samples) + * 99.0th: 120 (804 samples) + 99.9th: 187 (76 samples) + min=1, max=204 +Request Latencies percentiles (usec) runtime 1 (s) (9701 total samples) + 50.0th: 9744 (3081 samples) + 90.0th: 12208 (3758 samples) + * 99.0th: 20448 (704 samples) + 99.9th: 20512 (71 samples) + min=2512, max=20555 +RPS percentiles (requests) runtime 1 (s) (2 total samples) + 20.0th: 0 (1 samples) + * 50.0th: 0 (0 samples) + 90.0th: 9552 (1 samples) + min=9566, max=9566 +average rps: 9701 \ No newline at end of file diff --git a/paper_results/schbench/skyloft_cfs50us/96.txt b/paper_results/schbench/skyloft_cfs50us/96.txt new file mode 100644 index 0000000..a5cbd4d --- /dev/null +++ b/paper_results/schbench/skyloft_cfs50us/96.txt @@ -0,0 +1,18 @@ +Wakeup Latencies percentiles (usec) runtime 1 (s) (9768 total samples) + 50.0th: 17 (2892 samples) + 90.0th: 109 (3981 samples) + * 99.0th: 121 (746 samples) + 99.9th: 206 (80 samples) + min=1, max=232 +Request Latencies percentiles (usec) runtime 1 (s) (9855 total samples) + 50.0th: 9968 (2561 samples) + 90.0th: 12208 (4093 samples) + * 99.0th: 12336 (738 samples) + 99.9th: 12816 (80 samples) + min=2512, max=13943 +RPS percentiles (requests) runtime 1 (s) (2 total samples) + 20.0th: 0 (1 samples) + * 50.0th: 0 (0 samples) + 90.0th: 9712 (1 samples) + min=9719, max=9719 +average rps: 9855 \ No newline at end of file diff --git a/paper_results/schbench/skyloft_cfs50us/all.csv b/paper_results/schbench/skyloft_cfs50us/all.csv new file mode 100644 index 0000000..3d47662 --- /dev/null +++ b/paper_results/schbench/skyloft_cfs50us/all.csv @@ -0,0 +1,14 @@ +cores,wake99,rps50,lat99 +4,2,1510,2668 +8,2,0,2644 +16,3,0,2644 +24,4,0,2676 +32,34,0,4808 +40,34,0,4856 +48,35,0,5000 +56,79,0,9776 +64,81,0,9776 +72,83,0,9872 +80,116,0,12240 +88,120,0,20448 +96,121,0,12336 diff --git a/paper_results/schbench/skyloft_fifo/112.txt b/paper_results/schbench/skyloft_fifo/112.txt new file mode 100644 index 0000000..f80a26f --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/112.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (102435 total samples) + 50.0th: 2564 (30819 samples) + 90.0th: 4424 (41129 samples) + * 99.0th: 6968 (9861 samples) + 99.9th: 7224 (33 samples) + min=1, max=11168 +Request Latencies percentiles (usec) runtime 10 (s) (102409 total samples) + 50.0th: 4744 (27003 samples) + 90.0th: 9360 (49116 samples) + * 99.0th: 9392 (978 samples) + 99.9th: 11792 (577 samples) + min=2411, max=15703 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10128 (3 samples) + * 50.0th: 10224 (3 samples) + 90.0th: 10256 (5 samples) + min=10087, max=10252 +current rps: 10252 +Wakeup Latencies percentiles (usec) runtime 20 (s) (204975 total samples) + 50.0th: 2540 (61544 samples) + 90.0th: 4392 (82237 samples) + * 99.0th: 6968 (19744 samples) + 99.9th: 7000 (21 samples) + min=1, max=11168 +Request Latencies percentiles (usec) runtime 20 (s) (204952 total samples) + 50.0th: 4744 (53368 samples) + 90.0th: 9360 (99284 samples) + * 99.0th: 9392 (1524 samples) + 99.9th: 11696 (929 samples) + min=2411, max=15703 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10128 (5 samples) + * 50.0th: 10256 (16 samples) + 90.0th: 10256 (0 samples) + min=10083, max=10254 +current rps: 10245 +Wakeup Latencies percentiles (usec) runtime 30 (s) (307916 total samples) + 50.0th: 2508 (89408 samples) + 90.0th: 4376 (123709 samples) + * 99.0th: 6952 (27177 samples) + 99.9th: 6968 (2832 samples) + min=1, max=11168 +Request Latencies percentiles (usec) runtime 30 (s) (307908 total samples) + 50.0th: 4744 (79346 samples) + 90.0th: 9360 (150139 samples) + * 99.0th: 9392 (1949 samples) + 99.9th: 11696 (1086 samples) + min=2411, max=15703 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10160 (7 samples) + * 50.0th: 10256 (24 samples) + 90.0th: 10256 (0 samples) + min=10083, max=10258 +current rps: 10249 +Wakeup Latencies percentiles (usec) runtime 30 (s) (307977 total samples) + 50.0th: 2516 (89906 samples) + 90.0th: 4376 (123263 samples) + * 99.0th: 6952 (27186 samples) + 99.9th: 6968 (2832 samples) + min=1, max=11168 +Request Latencies percentiles (usec) runtime 30 (s) (308022 total samples) + 50.0th: 4744 (79370 samples) + 90.0th: 9360 (150205 samples) + * 99.0th: 9392 (1949 samples) + 99.9th: 11696 (1086 samples) + min=2411, max=15703 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10160 (7 samples) + * 50.0th: 10256 (24 samples) + 90.0th: 10256 (0 samples) + min=10083, max=10258 +average rps: 10267 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/128.txt b/paper_results/schbench/skyloft_fifo/128.txt new file mode 100644 index 0000000..28dd100 --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/128.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (103255 total samples) + 50.0th: 1550 (31006 samples) + 90.0th: 4068 (41303 samples) + * 99.0th: 6888 (9236 samples) + 99.9th: 9296 (991 samples) + min=1, max=12046 +Request Latencies percentiles (usec) runtime 10 (s) (103225 total samples) + 50.0th: 7032 (37585 samples) + 90.0th: 9456 (34676 samples) + * 99.0th: 11728 (9716 samples) + 99.9th: 12528 (476 samples) + min=2410, max=19592 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10256 (10 samples) + * 50.0th: 10256 (0 samples) + 90.0th: 10256 (0 samples) + min=10246, max=10275 +current rps: 10275 +Wakeup Latencies percentiles (usec) runtime 20 (s) (206044 total samples) + 50.0th: 1542 (61873 samples) + 90.0th: 4084 (82540 samples) + * 99.0th: 9232 (18525 samples) + 99.9th: 9264 (1698 samples) + min=1, max=12046 +Request Latencies percentiles (usec) runtime 20 (s) (206091 total samples) + 50.0th: 7032 (76542 samples) + 90.0th: 9488 (68055 samples) + * 99.0th: 11728 (19012 samples) + 99.9th: 14416 (1046 samples) + min=2410, max=23459 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10192 (5 samples) + * 50.0th: 10256 (11 samples) + 90.0th: 10288 (5 samples) + min=10136, max=10278 +current rps: 10136 +Wakeup Latencies percentiles (usec) runtime 30 (s) (309010 total samples) + 50.0th: 1510 (92847 samples) + 90.0th: 4068 (123652 samples) + * 99.0th: 9200 (27620 samples) + 99.9th: 9264 (2837 samples) + min=1, max=12369 +Request Latencies percentiles (usec) runtime 30 (s) (309158 total samples) + 50.0th: 7032 (115946 samples) + 90.0th: 9488 (100666 samples) + * 99.0th: 11728 (29016 samples) + 99.9th: 13968 (1443 samples) + min=2410, max=23459 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10192 (7 samples) + * 50.0th: 10256 (11 samples) + 90.0th: 10288 (13 samples) + min=10101, max=10278 +current rps: 10275 +Wakeup Latencies percentiles (usec) runtime 30 (s) (309088 total samples) + 50.0th: 1510 (92877 samples) + 90.0th: 4068 (123692 samples) + * 99.0th: 9200 (27624 samples) + 99.9th: 9264 (2837 samples) + min=1, max=12369 +Request Latencies percentiles (usec) runtime 30 (s) (309290 total samples) + 50.0th: 7032 (114619 samples) + 90.0th: 9488 (100719 samples) + * 99.0th: 11728 (29028 samples) + 99.9th: 13968 (1444 samples) + min=2410, max=23459 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10192 (7 samples) + * 50.0th: 10256 (11 samples) + 90.0th: 10288 (13 samples) + min=10101, max=10278 +average rps: 10310 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/144.txt b/paper_results/schbench/skyloft_fifo/144.txt new file mode 100644 index 0000000..8c166db --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/144.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (103841 total samples) + 50.0th: 1019 (31195 samples) + 90.0th: 2196 (41618 samples) + * 99.0th: 6904 (9220 samples) + 99.9th: 11600 (998 samples) + min=1, max=12902 +Request Latencies percentiles (usec) runtime 10 (s) (103847 total samples) + 50.0th: 7112 (18827 samples) + 90.0th: 11664 (46881 samples) + * 99.0th: 13936 (4598 samples) + 99.9th: 14032 (333 samples) + min=2411, max=23502 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10256 (3 samples) + * 50.0th: 10288 (8 samples) + 90.0th: 10288 (0 samples) + min=10256, max=10295 +current rps: 10286 +Wakeup Latencies percentiles (usec) runtime 20 (s) (207709 total samples) + 50.0th: 1013 (62375 samples) + 90.0th: 2196 (83402 samples) + * 99.0th: 6904 (18308 samples) + 99.9th: 11600 (1994 samples) + min=1, max=12902 +Request Latencies percentiles (usec) runtime 20 (s) (207752 total samples) + 50.0th: 7096 (37125 samples) + 90.0th: 11664 (94422 samples) + * 99.0th: 13936 (8640 samples) + 99.9th: 14000 (624 samples) + min=2411, max=23502 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10288 (21 samples) + * 50.0th: 10288 (0 samples) + 90.0th: 10288 (0 samples) + min=10256, max=10299 +current rps: 10292 +Wakeup Latencies percentiles (usec) runtime 30 (s) (311520 total samples) + 50.0th: 985 (93612 samples) + 90.0th: 2196 (125298 samples) + * 99.0th: 6904 (27264 samples) + 99.9th: 11600 (2977 samples) + min=1, max=12902 +Request Latencies percentiles (usec) runtime 30 (s) (311630 total samples) + 50.0th: 7096 (55375 samples) + 90.0th: 11664 (141528 samples) + * 99.0th: 13936 (13021 samples) + 99.9th: 14000 (963 samples) + min=2411, max=23502 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10288 (31 samples) + * 50.0th: 10288 (0 samples) + 90.0th: 10288 (0 samples) + min=10256, max=10299 +current rps: 10286 +Wakeup Latencies percentiles (usec) runtime 30 (s) (311633 total samples) + 50.0th: 985 (93632 samples) + 90.0th: 2196 (125381 samples) + * 99.0th: 6904 (27264 samples) + 99.9th: 11600 (2978 samples) + min=1, max=12902 +Request Latencies percentiles (usec) runtime 30 (s) (311777 total samples) + 50.0th: 7096 (55399 samples) + 90.0th: 11664 (141599 samples) + * 99.0th: 13936 (13025 samples) + 99.9th: 14000 (963 samples) + min=2411, max=23502 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10288 (31 samples) + * 50.0th: 10288 (0 samples) + 90.0th: 10288 (0 samples) + min=10256, max=10299 +average rps: 10393 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/16.txt b/paper_results/schbench/skyloft_fifo/16.txt new file mode 100644 index 0000000..005b7e3 --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/16.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (65562 total samples) + 50.0th: 1 (0 samples) + 90.0th: 1 (0 samples) + * 99.0th: 2 (5934 samples) + 99.9th: 2 (0 samples) + min=1, max=16 +Request Latencies percentiles (usec) runtime 10 (s) (66221 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2412 (0 samples) + * 99.0th: 2428 (3598 samples) + 99.9th: 2500 (36 samples) + min=2411, max=2529 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 6616 (9 samples) + * 50.0th: 6616 (0 samples) + 90.0th: 6632 (2 samples) + min=6612, max=6626 +current rps: 6622 +Wakeup Latencies percentiles (usec) runtime 20 (s) (130829 total samples) + 50.0th: 1 (0 samples) + 90.0th: 1 (0 samples) + * 99.0th: 2 (7412 samples) + 99.9th: 2 (0 samples) + min=1, max=16 +Request Latencies percentiles (usec) runtime 20 (s) (132453 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2412 (0 samples) + * 99.0th: 2420 (7579 samples) + 99.9th: 2428 (1148 samples) + min=2411, max=2529 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 6616 (16 samples) + * 50.0th: 6616 (0 samples) + 90.0th: 6632 (5 samples) + min=6612, max=6626 +current rps: 6624 +Wakeup Latencies percentiles (usec) runtime 30 (s) (196173 total samples) + 50.0th: 1 (0 samples) + 90.0th: 1 (0 samples) + * 99.0th: 2 (8857 samples) + 99.9th: 2 (0 samples) + min=1, max=16 +Request Latencies percentiles (usec) runtime 30 (s) (198691 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2412 (0 samples) + * 99.0th: 2420 (11896 samples) + 99.9th: 2428 (1148 samples) + min=2411, max=2529 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 6616 (20 samples) + * 50.0th: 6616 (0 samples) + 90.0th: 6632 (11 samples) + min=6612, max=6627 +current rps: 6621 +Wakeup Latencies percentiles (usec) runtime 30 (s) (196174 total samples) + 50.0th: 1 (0 samples) + 90.0th: 1 (0 samples) + * 99.0th: 2 (8857 samples) + 99.9th: 2 (0 samples) + min=1, max=16 +Request Latencies percentiles (usec) runtime 30 (s) (198708 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2412 (0 samples) + * 99.0th: 2420 (11897 samples) + 99.9th: 2428 (1148 samples) + min=2411, max=2529 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 6616 (20 samples) + * 50.0th: 6616 (0 samples) + 90.0th: 6632 (11 samples) + min=6612, max=6627 +average rps: 6624 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/160.txt b/paper_results/schbench/skyloft_fifo/160.txt new file mode 100644 index 0000000..5d53b91 --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/160.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (103867 total samples) + 50.0th: 2716 (31221 samples) + 90.0th: 4376 (41555 samples) + * 99.0th: 4728 (9350 samples) + 99.9th: 11600 (934 samples) + min=1, max=14458 +Request Latencies percentiles (usec) runtime 10 (s) (103844 total samples) + 50.0th: 9264 (20339 samples) + 90.0th: 13968 (47336 samples) + * 99.0th: 14032 (2569 samples) + 99.9th: 16272 (755 samples) + min=2411, max=23492 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10288 (11 samples) + * 50.0th: 10288 (0 samples) + 90.0th: 10288 (0 samples) + min=10263, max=10296 +current rps: 10290 +Wakeup Latencies percentiles (usec) runtime 20 (s) (207579 total samples) + 50.0th: 2724 (62594 samples) + 90.0th: 4376 (82973 samples) + * 99.0th: 4712 (18483 samples) + 99.9th: 11600 (1947 samples) + min=1, max=14458 +Request Latencies percentiles (usec) runtime 20 (s) (207584 total samples) + 50.0th: 9232 (38829 samples) + 90.0th: 13968 (96704 samples) + * 99.0th: 14032 (5615 samples) + 99.9th: 16272 (1290 samples) + min=2410, max=23492 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10288 (21 samples) + * 50.0th: 10288 (0 samples) + 90.0th: 10288 (0 samples) + min=10263, max=10296 +current rps: 10293 +Wakeup Latencies percentiles (usec) runtime 30 (s) (311479 total samples) + 50.0th: 2724 (93780 samples) + 90.0th: 4376 (125013 samples) + * 99.0th: 4728 (27315 samples) + 99.9th: 11600 (2999 samples) + min=1, max=14458 +Request Latencies percentiles (usec) runtime 30 (s) (311521 total samples) + 50.0th: 9232 (59585 samples) + 90.0th: 13968 (145638 samples) + * 99.0th: 14032 (6889 samples) + 99.9th: 16272 (2254 samples) + min=2410, max=23492 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10288 (31 samples) + * 50.0th: 10288 (0 samples) + 90.0th: 10288 (0 samples) + min=10260, max=10298 +current rps: 10298 +Wakeup Latencies percentiles (usec) runtime 30 (s) (311597 total samples) + 50.0th: 2724 (93738 samples) + 90.0th: 4376 (125073 samples) + * 99.0th: 4728 (27325 samples) + 99.9th: 11600 (2999 samples) + min=1, max=14458 +Request Latencies percentiles (usec) runtime 30 (s) (311686 total samples) + 50.0th: 9232 (59616 samples) + 90.0th: 13968 (145724 samples) + * 99.0th: 14032 (6892 samples) + 99.9th: 16272 (2256 samples) + min=2410, max=23492 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10288 (31 samples) + * 50.0th: 10288 (0 samples) + 90.0th: 10288 (0 samples) + min=10260, max=10298 +average rps: 10390 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/176.txt b/paper_results/schbench/skyloft_fifo/176.txt new file mode 100644 index 0000000..d31ae84 --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/176.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (104087 total samples) + 50.0th: 1486 (31325 samples) + 90.0th: 4068 (41563 samples) + * 99.0th: 4728 (9412 samples) + 99.9th: 13872 (900 samples) + min=1, max=14489 +Request Latencies percentiles (usec) runtime 10 (s) (104067 total samples) + 50.0th: 9360 (29862 samples) + 90.0th: 14000 (36582 samples) + * 99.0th: 16304 (8464 samples) + 99.9th: 18592 (540 samples) + min=2410, max=27391 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10288 (10 samples) + * 50.0th: 10288 (0 samples) + 90.0th: 10288 (0 samples) + min=10261, max=10310 +current rps: 10299 +Wakeup Latencies percentiles (usec) runtime 20 (s) (208103 total samples) + 50.0th: 1510 (62418 samples) + 90.0th: 4076 (83346 samples) + * 99.0th: 4728 (18585 samples) + 99.9th: 13872 (1909 samples) + min=1, max=14489 +Request Latencies percentiles (usec) runtime 20 (s) (208056 total samples) + 50.0th: 9360 (59670 samples) + 90.0th: 13968 (70050 samples) + * 99.0th: 16304 (19874 samples) + 99.9th: 18592 (843 samples) + min=2410, max=27391 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10288 (17 samples) + * 50.0th: 10288 (0 samples) + 90.0th: 10320 (4 samples) + min=10157, max=10311 +current rps: 10304 +Wakeup Latencies percentiles (usec) runtime 30 (s) (312266 total samples) + 50.0th: 1506 (93552 samples) + 90.0th: 4068 (125007 samples) + * 99.0th: 4728 (28122 samples) + 99.9th: 13872 (2780 samples) + min=1, max=15479 +Request Latencies percentiles (usec) runtime 30 (s) (312315 total samples) + 50.0th: 9360 (89523 samples) + 90.0th: 13968 (105554 samples) + * 99.0th: 16304 (29635 samples) + 99.9th: 18592 (1141 samples) + min=2410, max=27391 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10288 (26 samples) + * 50.0th: 10288 (0 samples) + 90.0th: 10320 (5 samples) + min=10157, max=10311 +current rps: 10294 +Wakeup Latencies percentiles (usec) runtime 30 (s) (312408 total samples) + 50.0th: 1510 (93870 samples) + 90.0th: 4068 (124773 samples) + * 99.0th: 4728 (28150 samples) + 99.9th: 13872 (2781 samples) + min=1, max=15479 +Request Latencies percentiles (usec) runtime 30 (s) (312498 total samples) + 50.0th: 9360 (89572 samples) + 90.0th: 13968 (105626 samples) + * 99.0th: 16304 (29652 samples) + 99.9th: 18592 (1141 samples) + min=2410, max=27391 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10288 (26 samples) + * 50.0th: 10288 (0 samples) + 90.0th: 10320 (5 samples) + min=10157, max=10311 +average rps: 10417 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/192.txt b/paper_results/schbench/skyloft_fifo/192.txt new file mode 100644 index 0000000..0f3d16d --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/192.txt @@ -0,0 +1,36 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (104078 total samples) + 50.0th: 1242 (31262 samples) + 90.0th: 2260 (41650 samples) + * 99.0th: 4600 (9308 samples) + 99.9th: 16240 (988 samples) + min=1, max=16898 +Request Latencies percentiles (usec) runtime 10 (s) (103994 total samples) + 50.0th: 9424 (28485 samples) + 90.0th: 16272 (43802 samples) + * 99.0th: 18528 (7693 samples) + 99.9th: 18656 (394 samples) + min=2410, max=31192 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10288 (6 samples) + * 50.0th: 10288 (0 samples) + 90.0th: 10320 (5 samples) + min=10275, max=10310 +current rps: 10306 +Wakeup Latencies percentiles (usec) runtime 10 (s) (104172 total samples) + 50.0th: 1242 (31285 samples) + 90.0th: 2260 (41710 samples) + * 99.0th: 4600 (9317 samples) + 99.9th: 16240 (988 samples) + min=1, max=16898 +Request Latencies percentiles (usec) runtime 10 (s) (104191 total samples) + 50.0th: 9424 (28534 samples) + 90.0th: 16272 (43890 samples) + * 99.0th: 18528 (7706 samples) + 99.9th: 18656 (395 samples) + min=2410, max=31192 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10288 (6 samples) + * 50.0th: 10288 (0 samples) + 90.0th: 10320 (5 samples) + min=10275, max=10310 +average rps: 10419 diff --git a/paper_results/schbench/skyloft_fifo/208.txt b/paper_results/schbench/skyloft_fifo/208.txt new file mode 100644 index 0000000..7535d79 --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/208.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (104237 total samples) + 50.0th: 2820 (31400 samples) + 90.0th: 4392 (41721 samples) + * 99.0th: 4760 (9281 samples) + 99.9th: 16240 (945 samples) + min=1, max=16889 +Request Latencies percentiles (usec) runtime 10 (s) (104147 total samples) + 50.0th: 11568 (28326 samples) + 90.0th: 16416 (42245 samples) + * 99.0th: 18656 (8885 samples) + 99.9th: 20896 (854 samples) + min=2411, max=31286 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10288 (7 samples) + * 50.0th: 10288 (0 samples) + 90.0th: 10320 (4 samples) + min=10277, max=10308 +current rps: 10297 +Wakeup Latencies percentiles (usec) runtime 20 (s) (208462 total samples) + 50.0th: 2796 (62747 samples) + 90.0th: 4376 (83817 samples) + * 99.0th: 4728 (18311 samples) + 99.9th: 16240 (1851 samples) + min=1, max=18915 +Request Latencies percentiles (usec) runtime 20 (s) (208403 total samples) + 50.0th: 11568 (56146 samples) + 90.0th: 16416 (84460 samples) + * 99.0th: 18656 (18188 samples) + 99.9th: 20896 (1248 samples) + min=2411, max=31286 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10288 (9 samples) + * 50.0th: 10320 (12 samples) + 90.0th: 10320 (0 samples) + min=10277, max=10314 +current rps: 10311 +Wakeup Latencies percentiles (usec) runtime 30 (s) (312558 total samples) + 50.0th: 2796 (94321 samples) + 90.0th: 4376 (125320 samples) + * 99.0th: 4728 (27282 samples) + 99.9th: 16240 (3047 samples) + min=1, max=18915 +Request Latencies percentiles (usec) runtime 30 (s) (312506 total samples) + 50.0th: 11568 (83541 samples) + 90.0th: 16416 (126540 samples) + * 99.0th: 18656 (27308 samples) + 99.9th: 20896 (1924 samples) + min=2411, max=31286 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10288 (13 samples) + * 50.0th: 10320 (18 samples) + 90.0th: 10320 (0 samples) + min=10221, max=10314 +current rps: 10307 +Wakeup Latencies percentiles (usec) runtime 30 (s) (312703 total samples) + 50.0th: 2796 (94342 samples) + 90.0th: 4376 (125410 samples) + * 99.0th: 4728 (27303 samples) + 99.9th: 16240 (3048 samples) + min=1, max=18915 +Request Latencies percentiles (usec) runtime 30 (s) (312723 total samples) + 50.0th: 11568 (83590 samples) + 90.0th: 16416 (126619 samples) + * 99.0th: 18656 (27341 samples) + 99.9th: 20896 (1927 samples) + min=2411, max=31286 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10288 (13 samples) + * 50.0th: 10320 (18 samples) + 90.0th: 10320 (0 samples) + min=10221, max=10314 +average rps: 10424 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/224.txt b/paper_results/schbench/skyloft_fifo/224.txt new file mode 100644 index 0000000..ca48250 --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/224.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (104437 total samples) + 50.0th: 1750 (31354 samples) + 90.0th: 4060 (41771 samples) + * 99.0th: 4712 (9400 samples) + 99.9th: 18528 (961 samples) + min=1, max=19292 +Request Latencies percentiles (usec) runtime 10 (s) (104330 total samples) + 50.0th: 11664 (34757 samples) + 90.0th: 18592 (40983 samples) + * 99.0th: 20896 (5992 samples) + 99.9th: 23200 (641 samples) + min=2411, max=31284 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10320 (11 samples) + * 50.0th: 10320 (0 samples) + 90.0th: 10320 (0 samples) + min=10271, max=10319 +current rps: 10319 +Wakeup Latencies percentiles (usec) runtime 20 (s) (208084 total samples) + 50.0th: 1750 (62598 samples) + 90.0th: 4084 (83053 samples) + * 99.0th: 6904 (18816 samples) + 99.9th: 18528 (1875 samples) + min=1, max=20586 +Request Latencies percentiles (usec) runtime 20 (s) (208061 total samples) + 50.0th: 11664 (69319 samples) + 90.0th: 18592 (81828 samples) + * 99.0th: 20896 (11753 samples) + 99.9th: 23200 (1560 samples) + min=2411, max=39049 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10256 (6 samples) + * 50.0th: 10320 (15 samples) + 90.0th: 10320 (0 samples) + min=10170, max=10320 +current rps: 10270 +Wakeup Latencies percentiles (usec) runtime 30 (s) (311803 total samples) + 50.0th: 1750 (93531 samples) + 90.0th: 4092 (124903 samples) + * 99.0th: 7272 (27779 samples) + 99.9th: 18528 (2899 samples) + min=1, max=20586 +Request Latencies percentiles (usec) runtime 30 (s) (311787 total samples) + 50.0th: 11664 (103900 samples) + 90.0th: 18592 (122811 samples) + * 99.0th: 20896 (17056 samples) + 99.9th: 23392 (2274 samples) + min=2410, max=39049 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10256 (9 samples) + * 50.0th: 10320 (22 samples) + 90.0th: 10320 (0 samples) + min=10137, max=10321 +current rps: 10137 +Wakeup Latencies percentiles (usec) runtime 30 (s) (311964 total samples) + 50.0th: 1750 (93575 samples) + 90.0th: 4092 (124988 samples) + * 99.0th: 7320 (27808 samples) + 99.9th: 18528 (2900 samples) + min=1, max=20586 +Request Latencies percentiles (usec) runtime 30 (s) (312019 total samples) + 50.0th: 11664 (103971 samples) + 90.0th: 18592 (122908 samples) + * 99.0th: 20896 (17071 samples) + 99.9th: 23392 (2275 samples) + min=2410, max=39049 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10256 (9 samples) + * 50.0th: 10320 (22 samples) + 90.0th: 10320 (0 samples) + min=10137, max=10321 +average rps: 10401 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/24.txt b/paper_results/schbench/skyloft_fifo/24.txt new file mode 100644 index 0000000..3984fab --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/24.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97552 total samples) + 50.0th: 9 (30993 samples) + 90.0th: 13 (35888 samples) + * 99.0th: 16 (6919 samples) + 99.9th: 17 (187 samples) + min=1, max=21 +Request Latencies percentiles (usec) runtime 10 (s) (98015 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (9093 samples) + * 99.0th: 2500 (1759 samples) + 99.9th: 2540 (877 samples) + min=2411, max=2950 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9776 (5 samples) + * 50.0th: 9808 (4 samples) + 90.0th: 9840 (2 samples) + min=9759, max=9825 +current rps: 9795 +Wakeup Latencies percentiles (usec) runtime 20 (s) (195184 total samples) + 50.0th: 9 (62737 samples) + 90.0th: 13 (74764 samples) + * 99.0th: 16 (15126 samples) + 99.9th: 17 (267 samples) + min=1, max=343 +Request Latencies percentiles (usec) runtime 20 (s) (195938 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (18172 samples) + * 99.0th: 2500 (2981 samples) + 99.9th: 2540 (1783 samples) + min=2411, max=4002 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9776 (10 samples) + * 50.0th: 9808 (9 samples) + 90.0th: 9808 (0 samples) + min=9753, max=9825 +current rps: 9799 +Wakeup Latencies percentiles (usec) runtime 30 (s) (292987 total samples) + 50.0th: 9 (92558 samples) + 90.0th: 13 (113826 samples) + * 99.0th: 16 (25337 samples) + 99.9th: 17 (445 samples) + min=1, max=343 +Request Latencies percentiles (usec) runtime 30 (s) (293957 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (26854 samples) + * 99.0th: 2500 (4237 samples) + 99.9th: 2540 (2619 samples) + min=2411, max=4002 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9776 (12 samples) + * 50.0th: 9808 (17 samples) + 90.0th: 9808 (0 samples) + min=9753, max=9825 +current rps: 9797 +Wakeup Latencies percentiles (usec) runtime 30 (s) (292988 total samples) + 50.0th: 9 (92558 samples) + 90.0th: 13 (113826 samples) + * 99.0th: 16 (25337 samples) + 99.9th: 17 (445 samples) + min=1, max=343 +Request Latencies percentiles (usec) runtime 30 (s) (293981 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (26856 samples) + * 99.0th: 2500 (4238 samples) + 99.9th: 2540 (2619 samples) + min=2411, max=4002 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9776 (12 samples) + * 50.0th: 9808 (17 samples) + 90.0th: 9808 (0 samples) + min=9753, max=9825 +average rps: 9799 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/240.txt b/paper_results/schbench/skyloft_fifo/240.txt new file mode 100644 index 0000000..3120ced --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/240.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (103865 total samples) + 50.0th: 3220 (30689 samples) + 90.0th: 4408 (41667 samples) + * 99.0th: 18528 (10030 samples) + 99.9th: 18592 (59 samples) + min=1, max=19959 +Request Latencies percentiles (usec) runtime 10 (s) (103681 total samples) + 50.0th: 11696 (30906 samples) + 90.0th: 18592 (41169 samples) + * 99.0th: 20960 (9413 samples) + 99.9th: 23136 (545 samples) + min=2412, max=35113 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10288 (3 samples) + * 50.0th: 10320 (8 samples) + 90.0th: 10320 (0 samples) + min=10283, max=10324 +current rps: 10319 +Wakeup Latencies percentiles (usec) runtime 20 (s) (207462 total samples) + 50.0th: 3212 (62053 samples) + 90.0th: 4392 (83065 samples) + * 99.0th: 18528 (19963 samples) + 99.9th: 18592 (159 samples) + min=1, max=21240 +Request Latencies percentiles (usec) runtime 20 (s) (207303 total samples) + 50.0th: 11696 (63487 samples) + 90.0th: 18592 (81915 samples) + * 99.0th: 20960 (18874 samples) + 99.9th: 23136 (995 samples) + min=2410, max=35113 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10320 (21 samples) + * 50.0th: 10320 (0 samples) + 90.0th: 10320 (0 samples) + min=10283, max=10324 +current rps: 10296 +Wakeup Latencies percentiles (usec) runtime 30 (s) (311113 total samples) + 50.0th: 3188 (93118 samples) + 90.0th: 4376 (124741 samples) + * 99.0th: 18528 (30202 samples) + 99.9th: 18592 (214 samples) + min=1, max=30545 +Request Latencies percentiles (usec) runtime 30 (s) (310990 total samples) + 50.0th: 11696 (95215 samples) + 90.0th: 18592 (122890 samples) + * 99.0th: 20896 (26588 samples) + 99.9th: 23136 (2853 samples) + min=2410, max=35113 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10320 (31 samples) + * 50.0th: 10320 (0 samples) + 90.0th: 10320 (0 samples) + min=10283, max=10324 +current rps: 10292 +Wakeup Latencies percentiles (usec) runtime 30 (s) (311236 total samples) + 50.0th: 3188 (93134 samples) + 90.0th: 4376 (124815 samples) + * 99.0th: 18528 (30212 samples) + 99.9th: 18592 (214 samples) + min=1, max=30545 +Request Latencies percentiles (usec) runtime 30 (s) (311237 total samples) + 50.0th: 11696 (95290 samples) + 90.0th: 18592 (122992 samples) + * 99.0th: 20896 (26610 samples) + 99.9th: 23136 (2855 samples) + min=2410, max=35113 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10320 (31 samples) + * 50.0th: 10320 (0 samples) + 90.0th: 10320 (0 samples) + min=10283, max=10324 +average rps: 10375 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/256.txt b/paper_results/schbench/skyloft_fifo/256.txt new file mode 100644 index 0000000..14c4ea0 --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/256.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (103974 total samples) + 50.0th: 4760 (31473 samples) + 90.0th: 6696 (41424 samples) + * 99.0th: 19168 (9240 samples) + 99.9th: 20768 (935 samples) + min=1, max=24420 +Request Latencies percentiles (usec) runtime 10 (s) (103853 total samples) + 50.0th: 11760 (31200 samples) + 90.0th: 20896 (42097 samples) + * 99.0th: 23328 (8679 samples) + 99.9th: 25568 (905 samples) + min=2410, max=42959 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10288 (4 samples) + * 50.0th: 10320 (7 samples) + 90.0th: 10320 (0 samples) + min=10277, max=10320 +current rps: 10320 +Wakeup Latencies percentiles (usec) runtime 20 (s) (207752 total samples) + 50.0th: 4840 (62292 samples) + 90.0th: 6680 (83386 samples) + * 99.0th: 18528 (18367 samples) + 99.9th: 20576 (1891 samples) + min=1, max=24420 +Request Latencies percentiles (usec) runtime 20 (s) (207694 total samples) + 50.0th: 11760 (62411 samples) + 90.0th: 20896 (84309 samples) + * 99.0th: 23328 (17840 samples) + 99.9th: 25568 (1483 samples) + min=2410, max=42959 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10320 (21 samples) + * 50.0th: 10320 (0 samples) + 90.0th: 10320 (0 samples) + min=10277, max=10321 +current rps: 10313 +Wakeup Latencies percentiles (usec) runtime 30 (s) (311532 total samples) + 50.0th: 4872 (93939 samples) + 90.0th: 6696 (124609 samples) + * 99.0th: 18528 (28238 samples) + 99.9th: 20512 (2061 samples) + min=1, max=24978 +Request Latencies percentiles (usec) runtime 30 (s) (311496 total samples) + 50.0th: 11760 (93017 samples) + 90.0th: 20896 (126570 samples) + * 99.0th: 23328 (26937 samples) + 99.9th: 25568 (2047 samples) + min=2410, max=42959 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10320 (31 samples) + * 50.0th: 10320 (0 samples) + 90.0th: 10320 (0 samples) + min=10274, max=10323 +current rps: 10274 +Wakeup Latencies percentiles (usec) runtime 30 (s) (311745 total samples) + 50.0th: 4872 (94124 samples) + 90.0th: 6696 (124633 samples) + * 99.0th: 18528 (28246 samples) + 99.9th: 20512 (2081 samples) + min=1, max=24978 +Request Latencies percentiles (usec) runtime 30 (s) (311753 total samples) + 50.0th: 11760 (93090 samples) + 90.0th: 20896 (126663 samples) + * 99.0th: 23328 (26976 samples) + 99.9th: 25568 (2049 samples) + min=2410, max=42959 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10320 (31 samples) + * 50.0th: 10320 (0 samples) + 90.0th: 10320 (0 samples) + min=10274, max=10323 +average rps: 10392 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/32.txt b/paper_results/schbench/skyloft_fifo/32.txt new file mode 100644 index 0000000..defe4ec --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/32.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97441 total samples) + 50.0th: 7 (29829 samples) + 90.0th: 1718 (35660 samples) + * 99.0th: 2276 (8745 samples) + 99.9th: 3620 (862 samples) + min=1, max=3867 +Request Latencies percentiles (usec) runtime 10 (s) (97685 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (33816 samples) + * 99.0th: 3996 (3887 samples) + 99.9th: 4728 (416 samples) + min=2411, max=7898 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9648 (3 samples) + * 50.0th: 9776 (3 samples) + 90.0th: 9808 (4 samples) + min=9656, max=9831 +current rps: 9820 +Wakeup Latencies percentiles (usec) runtime 20 (s) (195281 total samples) + 50.0th: 8 (53131 samples) + 90.0th: 1682 (66113 samples) + * 99.0th: 2268 (17768 samples) + 99.9th: 3516 (1544 samples) + min=1, max=3867 +Request Latencies percentiles (usec) runtime 20 (s) (195565 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (59586 samples) + * 99.0th: 2548 (6936 samples) + 99.9th: 4728 (1787 samples) + min=2411, max=7898 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9712 (5 samples) + * 50.0th: 9808 (15 samples) + 90.0th: 9808 (0 samples) + min=9644, max=9831 +current rps: 9802 +Wakeup Latencies percentiles (usec) runtime 30 (s) (293340 total samples) + 50.0th: 8 (100369 samples) + 90.0th: 1670 (99991 samples) + * 99.0th: 2260 (26624 samples) + 99.9th: 3340 (2315 samples) + min=1, max=3867 +Request Latencies percentiles (usec) runtime 30 (s) (293676 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (88055 samples) + * 99.0th: 2540 (9128 samples) + 99.9th: 4728 (2358 samples) + min=2411, max=7904 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9776 (12 samples) + * 50.0th: 9808 (16 samples) + 90.0th: 9808 (0 samples) + min=9644, max=9831 +current rps: 9827 +Wakeup Latencies percentiles (usec) runtime 30 (s) (293349 total samples) + 50.0th: 8 (100369 samples) + 90.0th: 1670 (99998 samples) + * 99.0th: 2260 (26625 samples) + 99.9th: 3340 (2315 samples) + min=1, max=3867 +Request Latencies percentiles (usec) runtime 30 (s) (293708 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (88064 samples) + * 99.0th: 2540 (9129 samples) + 99.9th: 4728 (2358 samples) + min=2411, max=7904 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9776 (12 samples) + * 50.0th: 9808 (16 samples) + 90.0th: 9808 (0 samples) + min=9644, max=9831 +average rps: 9790 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/4.txt b/paper_results/schbench/skyloft_fifo/4.txt new file mode 100644 index 0000000..a216986 --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/4.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (16286 total samples) + 50.0th: 1 (0 samples) + 90.0th: 1 (0 samples) + * 99.0th: 2 (926 samples) + 99.9th: 2 (0 samples) + min=1, max=3 +Request Latencies percentiles (usec) runtime 10 (s) (16552 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (4149 samples) + * 99.0th: 2420 (0 samples) + 99.9th: 2428 (92 samples) + min=2412, max=2934 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 1654 (6 samples) + * 50.0th: 1654 (0 samples) + 90.0th: 1658 (5 samples) + min=1654, max=1657 +current rps: 1655 +Wakeup Latencies percentiles (usec) runtime 20 (s) (32830 total samples) + 50.0th: 1 (0 samples) + 90.0th: 2 (3841 samples) + * 99.0th: 2 (0 samples) + 99.9th: 2 (0 samples) + min=1, max=3 +Request Latencies percentiles (usec) runtime 20 (s) (33103 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (8296 samples) + * 99.0th: 2420 (0 samples) + 99.9th: 2428 (94 samples) + min=2412, max=2934 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 1654 (13 samples) + * 50.0th: 1654 (0 samples) + 90.0th: 1658 (8 samples) + min=1654, max=1657 +current rps: 1655 +Wakeup Latencies percentiles (usec) runtime 30 (s) (49376 total samples) + 50.0th: 1 (0 samples) + 90.0th: 2 (6434 samples) + * 99.0th: 2 (0 samples) + 99.9th: 2 (0 samples) + min=1, max=3 +Request Latencies percentiles (usec) runtime 30 (s) (49657 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (12429 samples) + * 99.0th: 2420 (0 samples) + 99.9th: 2428 (94 samples) + min=2412, max=2934 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 1654 (20 samples) + * 50.0th: 1654 (0 samples) + 90.0th: 1658 (11 samples) + min=1654, max=1657 +current rps: 1656 +Wakeup Latencies percentiles (usec) runtime 30 (s) (49376 total samples) + 50.0th: 1 (0 samples) + 90.0th: 2 (6434 samples) + * 99.0th: 2 (0 samples) + 99.9th: 2 (0 samples) + min=1, max=3 +Request Latencies percentiles (usec) runtime 30 (s) (49661 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (12430 samples) + * 99.0th: 2420 (0 samples) + 99.9th: 2428 (94 samples) + min=2412, max=2934 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 1654 (20 samples) + * 50.0th: 1654 (0 samples) + 90.0th: 1658 (11 samples) + min=1654, max=1657 +average rps: 1655 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/40.txt b/paper_results/schbench/skyloft_fifo/40.txt new file mode 100644 index 0000000..43841af --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/40.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (97190 total samples) + 50.0th: 733 (22961 samples) + 90.0th: 2108 (38903 samples) + * 99.0th: 2332 (8843 samples) + 99.9th: 3724 (723 samples) + min=1, max=3901 +Request Latencies percentiles (usec) runtime 10 (s) (97647 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (24803 samples) + * 99.0th: 4004 (7659 samples) + 99.9th: 4728 (616 samples) + min=2411, max=7903 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9680 (5 samples) + * 50.0th: 9744 (1 samples) + 90.0th: 9840 (4 samples) + min=9659, max=9859 +current rps: 9675 +Wakeup Latencies percentiles (usec) runtime 20 (s) (194432 total samples) + 50.0th: 711 (46210 samples) + 90.0th: 2076 (78115 samples) + * 99.0th: 2324 (17453 samples) + 99.9th: 3684 (1393 samples) + min=1, max=3901 +Request Latencies percentiles (usec) runtime 20 (s) (195445 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (49350 samples) + * 99.0th: 3996 (12537 samples) + 99.9th: 4728 (1450 samples) + min=2411, max=7903 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 9680 (8 samples) + * 50.0th: 9808 (8 samples) + 90.0th: 9840 (4 samples) + min=9652, max=9859 +current rps: 9815 +Wakeup Latencies percentiles (usec) runtime 30 (s) (292507 total samples) + 50.0th: 695 (73549 samples) + 90.0th: 2042 (117041 samples) + * 99.0th: 2316 (27073 samples) + 99.9th: 3580 (1683 samples) + min=1, max=3901 +Request Latencies percentiles (usec) runtime 30 (s) (293752 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (67506 samples) + * 99.0th: 3996 (14559 samples) + 99.9th: 4728 (2059 samples) + min=2411, max=7903 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (8 samples) + * 50.0th: 9808 (12 samples) + 90.0th: 9840 (10 samples) + min=9652, max=9859 +current rps: 9828 +Wakeup Latencies percentiles (usec) runtime 30 (s) (292524 total samples) + 50.0th: 695 (73553 samples) + 90.0th: 2042 (117052 samples) + * 99.0th: 2316 (27075 samples) + 99.9th: 3580 (1683 samples) + min=1, max=3901 +Request Latencies percentiles (usec) runtime 30 (s) (293792 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2420 (67512 samples) + * 99.0th: 3996 (14560 samples) + 99.9th: 4728 (2059 samples) + min=2411, max=7903 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 9680 (8 samples) + * 50.0th: 9808 (12 samples) + 90.0th: 9840 (10 samples) + min=9652, max=9859 +average rps: 9793 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/48.txt b/paper_results/schbench/skyloft_fifo/48.txt new file mode 100644 index 0000000..a92b4c5 --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/48.txt @@ -0,0 +1,36 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (100628 total samples) + 50.0th: 17 (28517 samples) + 90.0th: 2308 (41850 samples) + * 99.0th: 2324 (7932 samples) + 99.9th: 3156 (356 samples) + min=1, max=3882 +Request Latencies percentiles (usec) runtime 10 (s) (100986 total samples) + 50.0th: 2420 (11143 samples) + 90.0th: 4728 (45586 samples) + * 99.0th: 4744 (721 samples) + 99.9th: 4920 (297 samples) + min=2411, max=7900 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10032 (3 samples) + * 50.0th: 10096 (7 samples) + 90.0th: 10096 (0 samples) + min=9955, max=10129 +current rps: 10104 +Wakeup Latencies percentiles (usec) runtime 10 (s) (100668 total samples) + 50.0th: 17 (28526 samples) + 90.0th: 2308 (41864 samples) + * 99.0th: 2324 (7936 samples) + 99.9th: 3156 (356 samples) + min=1, max=3882 +Request Latencies percentiles (usec) runtime 10 (s) (101050 total samples) + 50.0th: 2420 (11149 samples) + 90.0th: 4728 (45623 samples) + * 99.0th: 4744 (721 samples) + 99.9th: 4920 (297 samples) + min=2411, max=7900 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10032 (3 samples) + * 50.0th: 10096 (7 samples) + 90.0th: 10096 (0 samples) + min=9955, max=10129 +average rps: 10105 diff --git a/paper_results/schbench/skyloft_fifo/64.txt b/paper_results/schbench/skyloft_fifo/64.txt new file mode 100644 index 0000000..754245c --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/64.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (102690 total samples) + 50.0th: 1182 (30894 samples) + 90.0th: 2284 (41172 samples) + * 99.0th: 2324 (9270 samples) + 99.9th: 4632 (767 samples) + min=1, max=7739 +Request Latencies percentiles (usec) runtime 10 (s) (102722 total samples) + 50.0th: 4632 (0 samples) + 90.0th: 4680 (8779 samples) + * 99.0th: 4728 (7938 samples) + 99.9th: 7800 (978 samples) + min=2412, max=11711 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10224 (3 samples) + * 50.0th: 10288 (8 samples) + 90.0th: 10288 (0 samples) + min=10136, max=10279 +current rps: 10275 +Wakeup Latencies percentiles (usec) runtime 20 (s) (204560 total samples) + 50.0th: 1210 (61402 samples) + 90.0th: 2292 (83226 samples) + * 99.0th: 2436 (16951 samples) + 99.9th: 4632 (1997 samples) + min=1, max=7790 +Request Latencies percentiles (usec) runtime 20 (s) (204631 total samples) + 50.0th: 4632 (0 samples) + 90.0th: 4680 (14911 samples) + * 99.0th: 6952 (16199 samples) + 99.9th: 7800 (1982 samples) + min=2411, max=11711 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10096 (6 samples) + * 50.0th: 10256 (6 samples) + 90.0th: 10288 (9 samples) + min=10095, max=10280 +current rps: 10095 +Wakeup Latencies percentiles (usec) runtime 30 (s) (307307 total samples) + 50.0th: 1206 (92372 samples) + 90.0th: 2292 (125430 samples) + * 99.0th: 2324 (25010 samples) + 99.9th: 4632 (2971 samples) + min=1, max=7790 +Request Latencies percentiles (usec) runtime 30 (s) (307418 total samples) + 50.0th: 4632 (0 samples) + 90.0th: 4680 (21279 samples) + * 99.0th: 4776 (23456 samples) + 99.9th: 7800 (3015 samples) + min=2411, max=11711 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10128 (8 samples) + * 50.0th: 10288 (23 samples) + 90.0th: 10288 (0 samples) + min=10095, max=10284 +current rps: 10278 +Wakeup Latencies percentiles (usec) runtime 30 (s) (307328 total samples) + 50.0th: 1206 (92385 samples) + 90.0th: 2292 (125434 samples) + * 99.0th: 2324 (25012 samples) + 99.9th: 4632 (2971 samples) + min=1, max=7790 +Request Latencies percentiles (usec) runtime 30 (s) (307484 total samples) + 50.0th: 4632 (0 samples) + 90.0th: 4680 (21282 samples) + * 99.0th: 4776 (23460 samples) + 99.9th: 7800 (3015 samples) + min=2411, max=11711 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10128 (8 samples) + * 50.0th: 10288 (23 samples) + 90.0th: 10288 (0 samples) + min=10095, max=10284 +average rps: 10249 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/72.txt b/paper_results/schbench/skyloft_fifo/72.txt new file mode 100644 index 0000000..30b4abb --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/72.txt @@ -0,0 +1,36 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101825 total samples) + 50.0th: 1026 (30516 samples) + 90.0th: 4216 (40743 samples) + * 99.0th: 4632 (9990 samples) + 99.9th: 4648 (51 samples) + min=1, max=6016 +Request Latencies percentiles (usec) runtime 10 (s) (101917 total samples) + 50.0th: 4632 (25201 samples) + 90.0th: 6984 (39595 samples) + * 99.0th: 7048 (8309 samples) + 99.9th: 7192 (620 samples) + min=2411, max=11235 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10160 (10 samples) + * 50.0th: 10160 (0 samples) + 90.0th: 10160 (0 samples) + min=10139, max=10178 +current rps: 10168 +Wakeup Latencies percentiles (usec) runtime 10 (s) (101859 total samples) + 50.0th: 1026 (30521 samples) + 90.0th: 4216 (40764 samples) + * 99.0th: 4632 (9996 samples) + 99.9th: 4648 (51 samples) + min=1, max=6016 +Request Latencies percentiles (usec) runtime 10 (s) (101998 total samples) + 50.0th: 4632 (25225 samples) + 90.0th: 6984 (39623 samples) + * 99.0th: 7048 (8314 samples) + 99.9th: 7192 (621 samples) + min=2411, max=11235 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10160 (10 samples) + * 50.0th: 10160 (0 samples) + 90.0th: 10160 (0 samples) + min=10139, max=10178 +average rps: 10200 diff --git a/paper_results/schbench/skyloft_fifo/8.txt b/paper_results/schbench/skyloft_fifo/8.txt new file mode 100644 index 0000000..8cfc373 --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/8.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (30158 total samples) + 50.0th: 1 (0 samples) + 90.0th: 1 (0 samples) + * 99.0th: 2 (693 samples) + 99.9th: 2 (0 samples) + min=1, max=5 +Request Latencies percentiles (usec) runtime 10 (s) (32794 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2532 (10030 samples) + * 99.0th: 2540 (2437 samples) + 99.9th: 2540 (0 samples) + min=2410, max=2614 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 3276 (9 samples) + * 50.0th: 3276 (0 samples) + 90.0th: 3284 (1 samples) + min=3272, max=3308 +current rps: 3276 +Wakeup Latencies percentiles (usec) runtime 20 (s) (59812 total samples) + 50.0th: 1 (0 samples) + 90.0th: 1 (0 samples) + * 99.0th: 2 (754 samples) + 99.9th: 2 (0 samples) + min=1, max=5 +Request Latencies percentiles (usec) runtime 20 (s) (65562 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2532 (19332 samples) + * 99.0th: 2540 (4873 samples) + 99.9th: 2540 (0 samples) + min=2410, max=2877 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 3276 (19 samples) + * 50.0th: 3276 (0 samples) + 90.0th: 3276 (0 samples) + min=3272, max=3308 +current rps: 3278 +Wakeup Latencies percentiles (usec) runtime 30 (s) (89513 total samples) + 50.0th: 1 (0 samples) + 90.0th: 1 (0 samples) + * 99.0th: 1 (0 samples) + 99.9th: 2 (798 samples) + min=1, max=5 +Request Latencies percentiles (usec) runtime 30 (s) (98329 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2532 (29015 samples) + * 99.0th: 2540 (7325 samples) + 99.9th: 2540 (0 samples) + min=2410, max=2877 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 3276 (29 samples) + * 50.0th: 3276 (0 samples) + 90.0th: 3276 (0 samples) + min=3272, max=3308 +current rps: 3275 +Wakeup Latencies percentiles (usec) runtime 30 (s) (89514 total samples) + 50.0th: 1 (0 samples) + 90.0th: 1 (0 samples) + * 99.0th: 1 (0 samples) + 99.9th: 2 (798 samples) + min=1, max=5 +Request Latencies percentiles (usec) runtime 30 (s) (98338 total samples) + 50.0th: 2412 (0 samples) + 90.0th: 2532 (29018 samples) + * 99.0th: 2540 (7326 samples) + 99.9th: 2540 (0 samples) + min=2410, max=2877 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 3276 (29 samples) + * 50.0th: 3276 (0 samples) + 90.0th: 3276 (0 samples) + min=3272, max=3308 +average rps: 3278 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/80.txt b/paper_results/schbench/skyloft_fifo/80.txt new file mode 100644 index 0000000..b56c543 --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/80.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (101547 total samples) + 50.0th: 2156 (30147 samples) + 90.0th: 4568 (42214 samples) + * 99.0th: 4632 (8133 samples) + 99.9th: 4696 (307 samples) + min=1, max=7248 +Request Latencies percentiles (usec) runtime 10 (s) (101585 total samples) + 50.0th: 4712 (23588 samples) + 90.0th: 7032 (43372 samples) + * 99.0th: 7080 (5799 samples) + 99.9th: 7896 (884 samples) + min=2411, max=11797 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10032 (4 samples) + * 50.0th: 10192 (7 samples) + 90.0th: 10192 (0 samples) + min=9980, max=10207 +current rps: 10036 +Wakeup Latencies percentiles (usec) runtime 20 (s) (203379 total samples) + 50.0th: 2148 (59911 samples) + 90.0th: 4568 (85186 samples) + * 99.0th: 4632 (15697 samples) + 99.9th: 4680 (538 samples) + min=1, max=7248 +Request Latencies percentiles (usec) runtime 20 (s) (203456 total samples) + 50.0th: 4712 (48073 samples) + 90.0th: 7032 (87806 samples) + * 99.0th: 7080 (10106 samples) + 99.9th: 7896 (1398 samples) + min=2411, max=11809 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10032 (5 samples) + * 50.0th: 10192 (13 samples) + 90.0th: 10224 (3 samples) + min=9980, max=10218 +current rps: 10218 +Wakeup Latencies percentiles (usec) runtime 30 (s) (305593 total samples) + 50.0th: 2148 (89686 samples) + 90.0th: 4568 (128517 samples) + * 99.0th: 4632 (22983 samples) + 99.9th: 4664 (682 samples) + min=1, max=7248 +Request Latencies percentiles (usec) runtime 30 (s) (305709 total samples) + 50.0th: 4712 (71975 samples) + 90.0th: 7032 (133112 samples) + * 99.0th: 7064 (13729 samples) + 99.9th: 7896 (1998 samples) + min=2411, max=11809 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10128 (7 samples) + * 50.0th: 10192 (16 samples) + 90.0th: 10224 (8 samples) + min=9980, max=10220 +current rps: 10220 +Wakeup Latencies percentiles (usec) runtime 30 (s) (305621 total samples) + 50.0th: 2148 (89692 samples) + 90.0th: 4568 (128539 samples) + * 99.0th: 4632 (22983 samples) + 99.9th: 4664 (682 samples) + min=1, max=7248 +Request Latencies percentiles (usec) runtime 30 (s) (305790 total samples) + 50.0th: 4712 (71992 samples) + 90.0th: 7032 (133150 samples) + * 99.0th: 7064 (13736 samples) + 99.9th: 7896 (1998 samples) + min=2411, max=11809 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10128 (7 samples) + * 50.0th: 10192 (16 samples) + 90.0th: 10224 (8 samples) + min=9980, max=10220 +average rps: 10193 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/96.txt b/paper_results/schbench/skyloft_fifo/96.txt new file mode 100644 index 0000000..6c9047f --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/96.txt @@ -0,0 +1,73 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (102769 total samples) + 50.0th: 324 (29617 samples) + 90.0th: 2140 (41118 samples) + * 99.0th: 6936 (9637 samples) + 99.9th: 6952 (586 samples) + min=1, max=8676 +Request Latencies percentiles (usec) runtime 10 (s) (102767 total samples) + 50.0th: 4728 (31585 samples) + 90.0th: 9328 (45738 samples) + * 99.0th: 9360 (1179 samples) + 99.9th: 9392 (41 samples) + min=2411, max=15675 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 10256 (11 samples) + * 50.0th: 10256 (0 samples) + 90.0th: 10256 (0 samples) + min=10218, max=10270 +current rps: 10270 +Wakeup Latencies percentiles (usec) runtime 20 (s) (205397 total samples) + 50.0th: 271 (57634 samples) + 90.0th: 2068 (82288 samples) + * 99.0th: 6936 (19072 samples) + 99.9th: 6952 (1201 samples) + min=1, max=8676 +Request Latencies percentiles (usec) runtime 20 (s) (205375 total samples) + 50.0th: 4728 (62981 samples) + 90.0th: 9328 (90940 samples) + * 99.0th: 9360 (2393 samples) + 99.9th: 9392 (78 samples) + min=2411, max=15675 +RPS percentiles (requests) runtime 20 (s) (21 total samples) + 20.0th: 10256 (21 samples) + * 50.0th: 10256 (0 samples) + 90.0th: 10256 (0 samples) + min=10143, max=10270 +current rps: 10243 +Wakeup Latencies percentiles (usec) runtime 30 (s) (308071 total samples) + 50.0th: 69 (87672 samples) + 90.0th: 1962 (123326 samples) + * 99.0th: 6936 (29369 samples) + 99.9th: 6952 (1254 samples) + min=1, max=8676 +Request Latencies percentiles (usec) runtime 30 (s) (308205 total samples) + 50.0th: 4728 (92281 samples) + 90.0th: 9328 (134640 samples) + * 99.0th: 9360 (7069 samples) + 99.9th: 9392 (112 samples) + min=2410, max=15690 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10256 (31 samples) + * 50.0th: 10256 (0 samples) + 90.0th: 10256 (0 samples) + min=10143, max=10270 +current rps: 10264 +Wakeup Latencies percentiles (usec) runtime 30 (s) (308141 total samples) + 50.0th: 69 (87701 samples) + 90.0th: 1962 (123346 samples) + * 99.0th: 6936 (29372 samples) + 99.9th: 6952 (1254 samples) + min=1, max=8676 +Request Latencies percentiles (usec) runtime 30 (s) (308302 total samples) + 50.0th: 4728 (92309 samples) + 90.0th: 9328 (134681 samples) + * 99.0th: 9360 (7074 samples) + 99.9th: 9392 (112 samples) + min=2410, max=15690 +RPS percentiles (requests) runtime 30 (s) (31 total samples) + 20.0th: 10256 (31 samples) + * 50.0th: 10256 (0 samples) + 90.0th: 10256 (0 samples) + min=10143, max=10270 +average rps: 10277 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_fifo/all.csv b/paper_results/schbench/skyloft_fifo/all.csv new file mode 100644 index 0000000..2e90c4b --- /dev/null +++ b/paper_results/schbench/skyloft_fifo/all.csv @@ -0,0 +1,22 @@ +cores,wake99,rps50,lat99 +4,2,1654,2420 +8,1,3276,2540 +16,2,6616,2420 +24,16,9808,2500 +32,2260,9808,2540 +40,2316,9808,3996 +48,2324,10096,4744 +64,2324,10288,4776 +72,4632,10160,7800 +80,4632,10192,7064 +96,6936,10256,9360 +112,6952,10256,9392 +128,9200,10256,11728 +144,6904,10288,13936 +160,4728,10288,14032 +176,4728,10288,16304 +192,4600,10288,18528 +208,4728,10320,18656 +224,7320,10320,20896 +240,18528,10320,20896 +256,18528,10320,23328 diff --git a/paper_results/schbench/skyloft_rr1ms/112.txt b/paper_results/schbench/skyloft_rr1ms/112.txt new file mode 100644 index 0000000000000000000000000000000000000000..3dcbfc993bd0e1ecbccb49b6277a6631e79a1ef2 GIT binary patch literal 5520 zcmdT|O>dkq5bZVmiaC<1syxPgI8y#W4^?`|5h2#?qAaknjW+GCuL;=L)izQpRWiOT zUcWW(&AjojJT^~aI>VPni=pj=g0qmV7<6|s*Xg80dkbsk((UCuLcJ9(8j9 z)%0hByiINZa`Ktz;~gL=C@dqzoYEpd@fAfWB^E_V#?V831kRVeGAx)yv7DA32xbtU zZCfg+@kMFQ6lEtq`D3bh& zu+6aLCX5tW<%u&cbB4{Gtr_8%dnn(qHhqwClKZaq3|n!x6*n~fDhdpX^nHf)Bxgw9 zkX&)*>MAqA4vWT0Mr2k^%1Ca0vN7ExV_7&ia$+LAfi}sMFt!Fct$0`=|7ck!7Ox1s zX4;VcmVH`TW}zir7(CPZdt5PXMncohey@^VDAoKBwLVJG928)38Re6Z+ZQI$qd9pS z-Hn4;CrRIE*}c@N`z4^v=HvClTzjXnZBDg1Hx~n2hPrgRJ=S`hr@B#3s#bbDiD6xT z{pb!S;V$a-XFDJ7Zd{hC0$48Gx*EUhPCl#p^|k=(+^xF}rf$&EkhCYdU5IXOIHD!D ze#Xero(`63j2tsdF5)=aGtN?tF^;(<7jYcznV;oCMF5)=a#|%p~#yE0IF5Wnr z&L58G=E)82pwE~`#BjtI#~t(;+(Dl)|5w5hW3G45XK)9726xbBa0h+HzN&DA6JgEJ s9rPKxgFZw5W1pGdo{p2g1AP?Gk3*+NDGtz%ecucRI86Qdbt*;j8_dkq5bZVmiaC<1s>UC%cBK4+9;)<^BSNg(MZ3VpHrlkmzGlG&QW7bVDuFLx z{MLSN=GhbHF+ZyI4Bv9A>Y`Kz&Po@mw&lsa$E{JtE$G(uD+LL}<`&|FAQfO6o1ehs z)!89$qZ@z(zoI?d0pU_W-(r$5Br}v8P*e)^D2hB06VzvrvY(41p&rE(E+-)9K(bqq zizFREb4*p%_v8yy`SX2(L_`njsa3|lPcX`qzaOKlCn=f*kF36-_0HO=S zwZ(KsK4czUq!tj4d6qb(`zd6!|GL=ltSeeKVKR7Kh9>UDxL3Xfk7)ld6aD>!aMCRCvjX z-$i%Chj!`r6+r(I#!d6nmiozL*JS~Qei#oEv}J9zBk50dKd`#K<48QY;kibRzV3Od zYvjm0xfRE8IG6&{HO4Us?E05Aa`Xk#Q(Ys+ohLUu*T~WTZS1M8k)!nF)*DAx`sZzR z{pf~lpw5K*m5=5clm619gUMpw9SfpFbYgcfW~l=mzQx p-N1c@{>M7g{Rg#a?H$;If~u)Y+i10iqN%F9-ow7F&eyJ1(O=&0vo-(# literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr1ms/16.txt b/paper_results/schbench/skyloft_rr1ms/16.txt new file mode 100644 index 0000000000000000000000000000000000000000..ab39c61f07258468a670dbfd5968ec27bb5cccfb GIT binary patch literal 6146 zcmds5!H%0S5bbsOiaC;1wZec2MA|>-q1qnyh%DkZD1cepG~0fCO~9_*O|#85l}zUX z#>4R5%)?`NBzrpa@PSC$RW%jRQ(npsjw@?&QjM#*LFpm>AN^w_=&#`^n=d^WqkKRK|p6}5* zc%V-M74kBU@w&*)aE#M9nj!v%V~iu?fwL&LC|ERcn(HtL$_vX0##~H1EvJ_}Mfo zUm8>c=Sp?Jd$EDzmB$N&iK0oFSyG_nOx3QfOqAwphnXpvC0{o?e<4l7QppLOVR@N0 zQB>g9#hE3@*1^9MrzVL`F+3&D%qqZJ<%z8reDJJ^h*w@a{gfy9N;&yL&FUEkEfqptDdmn+vK4@W1LJ$je_qG}rdmCbHS5%L z6tqP0`neSKcM9ud?C0;sMXX^JX-d(PgL0cp9BNtZO3B8dBw{a0Az4GadH#5)w+%HX zW%aEZ_Y=kr)8Y;=otSySKFgXviSqbZfO%}@O@N{9Bv&AHi`5g}jTJ`gwLZf15u@(* zKIbvf;O3)`c}y385ibB^wg8Ol1z^-Wa1GVRcc#$b=A(xgRoi#XyK!)%cFeB%h;adZ zMtwoL=3^cg&}Y<++%+HbxPU%`7SLzZ2cK&`)_MVb#{5*&+4L}v@dElxynsHV{%X5+ j&03HD>)9YTAa@j6*42`6x`m3hE$Ozf9oqgl0= zgprLyWXhs7Ig3CjRFQca|=6$Gm(MqtgOMv z=H!sK$qfKQpO8J=17-rj*mjsPnlDgdLQ#^Z9z{^bafG@9A;wrK&qJ(~Qxbt-0EuBk z#z_d3GP=MtRr`QHLL*-uIKn)6RL{LK_N{_BdQC84ZvE1nV9sJGj-@}AVgCt55NBD$ za~6#AjQCioixq)p!GvP!L%9=LUb(S?-`yZYahJGtL>L#GUnoN^$D^JKKNGU~E}In211g24l+#xAz9Q z0gQvNZn|%_($A*2Yzr{W!+HqNSFP2Kq`ydhVRduI5qok|zebK^103lFIQmxuPj!vC zW}e*CuaV>QfDcU9$T17-#=#mn`d0`~b&VW_CpYygRF+(4bd8>lmQ19b*( zpw8e8)EWO9!C&k3y-y}Lcms6?Z=lZL4b&O`hsK)yNH$Ps$Oh^R`HyvG_y@J??LF9o Zf~IRL+iA6jvTGXI?qT0Ir^{5U}T3Op0z}mE5UmP0(RT3$UDzRK_ zmRbDXo5x^t*WSzd2wz(x$F7$ej#71UH2uNe=d+gGC8*i<2MG*veF-_k5-FI;vFBv64&@*An7dP>2P#MK6gp0D+I8z}=mEC^(9_Ll9D%foPm0SJ-&0@=# z5ZvcAeCCzUd>9eZv&fw{5c!tSJeFy}ZU4{KToJ^o%(_`A>qSsUeQmSNS$6jhyAoXe zO0YosX8vmGO-8Y1G!oQ7r#x^OauQ464U5hSkPA}scq(UDth`}s5wDLA88)QMU&^Il zqk&~9GP|C`mFy^!in)~MCDv6PLD7a7o*oOZ_5HY?VD3kwEJ#<;-9U1Ei;*~R+ciaut~xlV zDPp7!T*5lKvhJX!s3UXW64r4m?;dH2I#wRtdXpkX_x|Ldrif8EaN9LOjCSXbTXOyE zf}FuV<6mfU!L vXRy!M6S26ZsN;X!XO`2`X*Snj_7a9^?9HU)4!UU=+HnWFc{o0GmCXJCabmKj literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr1ms/192.txt b/paper_results/schbench/skyloft_rr1ms/192.txt new file mode 100644 index 0000000000000000000000000000000000000000..e22bd6dede89933b16e6f838101e32989a4963b3 GIT binary patch literal 5521 zcmdT|O>dkq5bZVmiaC<1syzMx94Y^xhblefxI(PkMOk2jjW+GCuUTxst14|;RWiPS z@yNV4@7aTSET7bHf={JUZB;7`C)HP~HTBWnr-N42E%bx!Hwpx#`W8}wm=iFaDUYDb z=42yplN$g*J`!`d14@v?)KU~ovLH&jC?;j*qbT7lUq!tKDW|za#)*&O8NpQ$j6rgI zVJ7GjBzO?h)a^ZffTn!8&k1J9gL)p6Hg6w0#6tVA*o9XB{?fr zI2XdS$cdkeQy!A>gc;_!OJ2&wI&%OcValoF%n9Q>dHDP#)U}_oVAZ_RHv0ul>J+nr z6Fm~9n!0Q4 zI7yn)^!3Y5*FO~GY}{YpjTt_6Re9X$Q+c*wQ`6_#REM4E#;z&#liq359aZbbFPD0M zRP)WQ`d*Fu``pgcssfm9=5gJ9HFf{2cbC@!xc)poPcYQ2=`BdV5Bq`W`W7R4aPAr- zMz>OCq%mS-GrH*}MvVSL#6yh{qwwI|HAam7Q_Vw-5o7MbMVv?fnc<YAo(_pl$D)1}+1vYhvz6CYdc`JcKs&66XC^So7@0!@{yRs9Z;I(FpU&>&M=4)a};BgxhP5sk*}iOgAmhRIVHqJu{SZ`k|z)Hd63$?k#H%*D}<2?P8Y&O86a?kDPgRfqb%WU zotA|#ovT`lXqF_$SIZKcVt5m?!TNcIa7#oCPuKvoh0cG|I zWy}$sX*0xt%%T@1yqFJj%CHE|@YLn|gOb%=119HQJxS$nP*Ntnt-h6M`p&MCq^*om=yPN0!_M^M)KvOO@3iTUvYXd0AI<(K{l%{SUXLZ- zkMne{0HzDSuKTa1QO|mJc`U#@_v?Oxq3MjWAYHw73(?IjMs(oj&loYf`rx3(h%s~E zBF>{L*$!%qdE^dU#Cde@bPj5ac`O{bi1X-P4jj}N^C%p+c=Kp`e>kF>CpY$WBOEbe z+(4hf4fGlN5+9BjwcbFV!432o+(4hf4fGk@K%a5HS^eRT@Lbak^clK=K12UwpBeu_ f?FVxQ<{+W%yTxO z9_;sKo;|>8d64}HzLrL|RV_7~q^e|V>Z5I^y_VG@sNVJ)2^>=W2r0*ya4?-IkD$xu zWIZ2~2LO(~AhX{Bp*exkQp_1=ftQ%PShhjViy)ezh1U}ZG0v6o4Rc|%`PNHq?j(%3_<%6ZIhs&f!kp6(r3(vTbb^@@gvpZ2 zOqddELvgW`Wr&1RO@t{&Xycr&mKz8YMkda5LvxzEeES~eRgF=w*}Teac7rCF2w;RV zM{|km$!8v0S`OEeo=ugXW2P5|>5?dfayll#1W@`rNzuqD!&GDWkOph$#MInlC=IQa-1hUFhc>&fC9xt(P@ zi*hNsK(>C*Ej^a9rNr;bfZboRrBqzY)+#D#q}Jt6S(v?&9rtPeUe-T~!_neJan^Z8UwZO|>sfH*`&@54zB%JIZz% zzg+6wQTj$v{i=o&-rITHRsdtekL&J-snwe8Y4#Gz)jZ( zG1}4}T6F#7!M@~#79++L)ET^jI)hhGXYdN@3|>K1S9-`;D2!{Hb2W4!#t-RM<>>I zYLRQ#`kR0ei)dl(j5D;tOk!_$s*wrdhDPgEOq~L6(QU83p}yhwwVSVA#YOh6SM^xb zaw*wQUaFJe>?i?@F1SM$0OIiJ@rmftz%luOUV1BXVvj3rK@gmFX zvH&c7J8mZ!>rM$x((HO>Ah^Ef2o1UA86rpHS3@;Kj&2j2S%%2b)Yy>Y=#Det#?h6s za5=gYU$`7i`3u>P?nDx79Cg`Wx8VBGoj#JRTZkNYP-ox{>I~dLo$>xQ&%O-Nk2|O{ z-pxX|aojIFZ7 Rs&8A?9l>#Ihig~x=nqg3tj_=d literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr1ms/240.txt b/paper_results/schbench/skyloft_rr1ms/240.txt new file mode 100644 index 0000000..e69de29 diff --git a/paper_results/schbench/skyloft_rr1ms/32.txt b/paper_results/schbench/skyloft_rr1ms/32.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3887f253e361b578bdf9f98583bbdf1f11e9cb7 GIT binary patch literal 5512 zcmdT|U2mH(6y0n2759-=(}X_&yHfwb9;WW0j}ao?f)XILjneJc?-DSv+iX%RO)~c- zI6kq@x%Ze0d8+nmIKtP;s;+L7fuqv3>TGks>vS-xz6CwtxK)rtYHlIrrKB8eZ>s~C zsy!m|Hn{=FvoD$5-9rgWq=c0$3sBObD08rs2u6j6x&tYvwPYa|6la+af)OOg4M~=f z8I^L*y@deUrh6ctp{-sXNJeP#sGbL9?E3@@O7edQro719{-qEsAmH@!EaMJDgd~NB zy6EM+qz=Uyd6l|ZFE8-dQ>2`PIT`g*PP0ie0$K9-?R!{NJ>>vv`Kq$T6_IqVmynp* zc~cs2cXgKn>oX&+=RgX+B#L-zOl1<1(TTPcv`p$jYcwM0qqztCh9pn#@G7S6Y*sW7 zrB`G*(vC6mP8rE%9?){dD?PI^E^^;vm*V+m6@@IrrLLG&ly8goVY4oZONPt-{dl4~ zoamHr^lM0SE;o}n;fg6(;fuK__U$SJ@j;j4$qTl4T2L8m_c6OVsf1k7=?v@J(bssl z+^BrStCKco!8dGZ>5;f^SVrC$mM5(;ruwNicBfUfH9(S6ulGtXUoe{Vc*Cu1(|0(X zC2eJO^Rh9`Zv`bA@2a21A{_dhZ3=#A!ND0Mo@XZ~7m$(a&ac-WFgUFZ0U+L(^G}B;BEQ6VZ*4qw5ecb9Bcf zW{ykOo=L`7YuBh^&SUP#MVv?Xl;NnxSZnWT^Gq{Fj;@o&oX6!kJ6=Cx+YckUadLx? zBw@tJaRq&btf0@374#W=aSbCzKdzw9;JZy2G3IdveTJ-{&(P><8(I&t~ rVMLc#%;W#;GvmLe{b29G?i93r*VtaGE!2J6R^1l1Lwh_AwMzZ~2b-_r literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr1ms/4.txt b/paper_results/schbench/skyloft_rr1ms/4.txt new file mode 100644 index 0000000000000000000000000000000000000000..bce5f838ecd5cd101582528abd0722894e6d38fd GIT binary patch literal 6127 zcmeHLU2oeU6y0n6EAFFHO_jkPi7WLV>|yF2`e-4CEhx5e0WaNt{UA2fuF{W8?F7#|CRibv+fWWmlSwxhtL|j5yzB)&FRhm_>TLj`fTP)ME?go8^mOR{~(Jj3n zs8nYqxIs91Awit!;julFAkMRmjY!jMUWLaT=RxxbFjW|HoK3TgFew@sW0pA2adi9j z+mbsmX091~!rv+Ku%)KJwz6?LtLo(=y|aiVC`Yf+$5fyB8~{gV^zTR5NXJF>AcU41 zh+b;xm?w8zEEll#0 z3i5-x9W#qsDy4o>shyx?D*;6ZF7{No2U*0mtrNv~r$>{hB}%Y|O0wS+)~PY?PiDVv zcuks0_T-?$#wHI;)w@b@(^W+7WhE7FXg7@?FKpXTcT?5BYV+;j+F{(!0md6QF8O!G z#J#MJ%K}V&H|{1FSf_*rX-(G-c-Pk$XGU{`=_5vK@IKcuwcw_!k9Eufz?cVs(Ym&U z>Lb@{3vRmlh|&J#uuy%(SXyxY)=_`fESq=z=mG`cGbjL`K>_#-3czPj06v2P@EH_< z&!7N&1_j_V?hw(sjfXyi1MnGlVCi!mKh$T;;XEJIHK-khmUm2XLAOxzwk6#bwnN(= IyMji40OrF`>i_@% literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr1ms/40.txt b/paper_results/schbench/skyloft_rr1ms/40.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b8cdfe474afe12d9627b9f3d782041d6b435373 GIT binary patch literal 5515 zcmdT|O^=%}5bZVmiaD~Ys`3|LN7{eTL$y8Z5h3C>Xai(zqip-@YXUZ=iXyG5N@gy> z@sm9>@7a&!rFxaa8NOCVc6B2)oTaK|XPT3JP6sXP2T+3@w-Pc)^#i0#@H_|8o9YC* zYR?w=kn8|5^aYvY9!SApjubOe;1x>DD4u5?MUagE^$0>tYY9O;isg9-f)OOf4KYI4 zZAd5!76NFS?g@W}wt9cEFp)gVw?S(2!GeV-?i5T}k-Pmd8w(5D;tFO2dlW&0iU73~ z%<=`r2qyt(DY&5AwK!!KA8t1mOc+5^%>}14dH(i&O{f zP_BeiUaZ>bG7sfSMaFKk7s^#o5`q@?Lb)^KUEWcebBaSLa zso)|!dy8Ikj<{0=&x#PVR9s|YW*yS?Lel~LxL#eU_*R{_6dT?Oi(&;Yd}3=N9PgoG z!!@SF1tYxE9mtMU}<-cGk>Fu3cnWpdTG)mgasOG)Y&2I?> z9WT~TV}nn9U7bpOt}Yffg}yYVK9;5*ho;i6y40pW$!?y%el&-Z^bckIs~#_QAD8Js z0Zb2mUiUvtquz9R-4&~!%GB;D2ECZZeL9GywToTHOt%sF~Xo|BA`Yv+$K z_tC!qMchaC$`W%Q3zuBPeRL-2QjPJ<2$x*EeYC#6j_Ah8&c2hZBgPyz;4^pwK7%*l zGk60&dkq5bZVm3Li;SRUUr;j+B4ULzNzKM2K~|C<|}kP$c(XzJ_&GQxQV&X5F(P%;w#?_B9^N5Xz>{Kay7Gcv zpyq(DB};L^UXv{K6=VQbnSYY3AR=VBl5F85BOt7DC|UkK$u5*B5&2t`SE8kWt?Cvs z_GryDUyGIj&DUO+MJuR)Fq9K5#v+^LlV!hC{s+-|vT%XuBHKlju!eTyZ>-l<>GJTp z$6x7|u%wk*SKnl5_DWVejUYMo>Pag9L&lQcUcQxS`pz!nq^*ocB;uNB3snpvIU- z|GpG)9_Opur7^y9ZqIe@(M^*WYwg;;gBl}7;lRz$2r=5$ACBnkb`BOZijY5dpw&;CO-j=^|Dm} literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr1ms/72.txt b/paper_results/schbench/skyloft_rr1ms/72.txt new file mode 100644 index 0000000000000000000000000000000000000000..e99e42f5867e5818c6b47def9fedd03468e33ada GIT binary patch literal 5522 zcmdT|U2mH(6y0n2759-;(_|a-;Y$4ndziY1J|c^F3rc|0HcGc&KNGOAE7UHNCV>?R z8=t^E_Z<6LJoBUI&+wIN(N?uka2B!>t*%d|ANNXBJCMB@Hv&?K)ehnmrx=6oG(Umj z&DkJ#(H%gFz94;gfDGp_wHT+AW&w&#C`>bAQ3PcP@=*7{*!V2WlER{BS}+d;1BeYT z#Ei|LEDtbE-9F;a(D2tsf-s7n#7i%fzL9W_igyU7S?;J9!UYW?u!OUmIg}x6o;W9O zCY%owV~c5K$j_jeaFJ%nfr>Q141@_KXynY2B#)lHeGg@oV-k#*Z=}r@SQ0B3BE=M1 zOuSd#JK-@($P`z+oC6=>3l4DA2_Qke1l&x8VzgyX?_dT zmK>X4OhXG<0e#0(#F&qo1;mUxnHfn#ah4p*-Yk&7vCODYT{?Lb(%r~O)S0c9>$+>rG>RIoW&K*J`nP~Q8TQvt!;(LB6+e~g z%r6EuHg&0WbtrW=3^i9rRVv+`L_5u2FZKQ;T&JvlRm1-7+GSi-0At6^tL}%c<%=q> zuLYRKZr(khuUjomkak}pc-KXIc7Y;$ziyTPdX8uZ}it%4kO<{EZPQZB&uwmO2T z+LJ?GXIB6@`i$)12C$&u#||@2WrUI*#Ux%(k|VxCeFQ0|y*SNh6k~*}Krn*jcte4f ztr)`*rfs@g{0Z9X@fKr_vv2BQP{zJfut4-J!I<)5qB#l{YblOpumuYtJc=MG)>;<9 zm>_{XigGFB%O($V$AU)D*Af=s-n6`<*iet?FQeaWU zD?GGv71+3(BlEWdo3pG{##BF5X%AXedjmK-_xfJxZTilwqpYp0ZXQe1 z{8qr}c&>gJ<8til>R6gnb#}06m~&(6LuvbQYAbVZN@M$@>iqiCPqRO&`J$|U)#JH7 zkMs0E0ZbS3y6Jz|Mn9PH>9zoVKCjOk3{7XXBRM}qekHlN<2W~oggJ&2HW-|ukMkLq zu#f(@2{xvePnXBJqa^HO=;y)66!SQ@(}aBtZ#^mdC zpwHk9^clQ?K7%*VXYdC44BkMW!5io^cmsWgBzI=VEd@|^MK;i9+_!~zBFsCw}<`Eo}Q*!Wq$xfTCpSm literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr200us/128.txt b/paper_results/schbench/skyloft_rr200us/128.txt new file mode 100644 index 0000000..1224a37 --- /dev/null +++ b/paper_results/schbench/skyloft_rr200us/128.txt @@ -0,0 +1,37 @@ +Wakeup Latencies percentiles (usec) runtime 5 (s) (48000 total samples) + 50.0th: 205 (14322 samples) + 90.0th: 569 (19220 samples) + * 99.0th: 885 (4291 samples) + 99.9th: 1046 (433 samples) + min=1, max=1409 +Request Latencies percentiles (usec) runtime 5 (s) (48316 total samples) + 50.0th: 11824 (14556 samples) + 90.0th: 14224 (18613 samples) + * 99.0th: 23904 (4337 samples) + 99.9th: 24416 (407 samples) + min=2607, max=28037 +RPS percentiles (requests) runtime 5 (s) (6 total samples) + 20.0th: 9584 (4 samples) + * 50.0th: 9584 (0 samples) + 90.0th: 9712 (2 samples) + min=9585, max=9710 +current rps: 9587 +Wakeup Latencies percentiles (usec) runtime 10 (s) (96826 total samples) + 50.0th: 230 (29050 samples) + 90.0th: 563 (38730 samples) + * 99.0th: 859 (8666 samples) + 99.9th: 997 (869 samples) + min=1, max=1409 +Request Latencies percentiles (usec) runtime 10 (s) (97505 total samples) + 50.0th: 11792 (29167 samples) + 90.0th: 14192 (39422 samples) + * 99.0th: 23712 (8350 samples) + 99.9th: 24352 (825 samples) + min=2607, max=28047 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9584 (4 samples) + * 50.0th: 9616 (3 samples) + 90.0th: 9936 (4 samples) + min=9585, max=9942 +current rps: 9935 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_rr200us/144.txt b/paper_results/schbench/skyloft_rr200us/144.txt new file mode 100644 index 0000000000000000000000000000000000000000..9705ee05412810f201d69ff2a3209d2627500253 GIT binary patch literal 5515 zcmdT|O>dkq5bZVmiaBngs`3{f6Dj|ohblefh!E>^Q5M+PMw|B6*92@#x{8#lm5eWo z@d&>+^Tvk4?BqZ$OfN_@j2I01&|AAf|S4uAsy`^e!>%Bbe0)Xgsm zIUi5fPorFpeO(<(eX7njY#RF9nEFtfew><0Kj~7N{wTY7{q?8WAEm!2>!0;_viotK zItpOA@awvNG>v-J>tDRh_Tii@EN)RpP?J@8M*D=cZ7Y-HsCYR literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr200us/16.txt b/paper_results/schbench/skyloft_rr200us/16.txt new file mode 100644 index 0000000000000000000000000000000000000000..5f5ebbc1cf942d2a6a95bbc88808b564b8a2fd58 GIT binary patch literal 6141 zcmdT|O^=%}5bZVo6?05h)yg&>36b_6^iXXNdqfuTHYk8u+cew$`kH{f5Rt92Qo6<$ z{4tpKJkK*Dp4fpmHGE^5S6R*#)Ldq~(s^O}%SQ2R1+p>Yl0yVbwSr}UX|w??G%G-{ zvNo10v;c_kJ=Xgz5O8gYM;6IHEbbslx;?1d0+wV$*5hrG`=tHQ&}jUjGOr%US18%* z1BuWhe`&bV*O4SN4BjD0BNF!*FOZ~h7+MQL;t_`d(Sg;NgE@+hy^-aj!7Hx_z4IQl#1ob*U z84@xU)48b3GDaoSGJj1~{)fXlXjA!@ zHu@JLV@0ZJb~I*PQ^#Cq`&5f|sF^ybRB2K0s$V}n@?F7QC(V9mZ7Jc}u?y?~UB|5} z@vL+CqSDi60s669cN;W$rKJIh-AvcXZIo@Fz9!g?X`^;mRVDBx z7>~?1^9`OPkMc>4NBAVI>YG*>I4a$!-nIvKosUK}x1dKi?-W#!n_I|B!BGizu<`&# zc1MT2&29iH^by(p9bi#_A2}{L#|xD3sOf%034w|P^&W)qdliTVR9Uhr0l@?k=Lc4z zjG$tP>DvAte}GQD+>@%J*@Jo>m9cLmEC~M-VOo{GfE-hd$g;{H3uio{I4fy_N(c)g zJmb9NlqaBsu;7f$jPvSuzJwv_DVKzz?BVm5Wn1+uf@|h4t)*935?}x2nUJRp-^a$g zF2+7wZ@LhUkuN-LIKb2=!7Db`?Qkq>My;I_K_AVX>vh4h$2XW2quH%)tAbpi72Ha2 zsL0V0oQkF5!L79NbwgeVM#==0G|wp?=<$ha`g&a4N`3Dj=bSAt|K!%d7$Y}c>(_$d z0xfC?FDOp@T5#+PcVUxI7R1SW9yu>~3QGM-xfdqqVkyf+IW=ra@oNkl$>J-$jh0Oe znc6M2Y!T0wWh;UV&Ug{A6zcDmB_!*VG4h+L?OrRnGk~+xpr4e!++Z{t+@ULNJM?ZH zWu3ITeW^|RLjfzMll9%C?_p@?hYzm z)Xn#1I@#km&94-|d=b~p@YS~Z+0^I90{D4cj~k3_Z?!8j7(XnmZtiM~iQ|i0V~yc# zShJ2X-&d?-NTN0C7&B$XI)=k<%{sDB+=_LK-!fKQqgcO=F87yXb@S{7Zy;yz266^( zAZPFfat3c8XYdAc25%r|F282in=^O=IYU;LE6dmel--aGdhp7~VDfiuXvWY4QgjcBTG<9j5NkBSOSmPy?j4QMnSza7MV&J3V0;OV zU%~Tbzlf*$S&V1+T5Hiatx#|lvJt&*Pv$xum1y=LM>FpPY}sc67$!p&Ju7OzYoiliB(G+6|BR`uR9ocI{*UYYo z^nLX*Yhq;&kLxl_!Lt><>ofAdWHu|3PAFCX6qPf zvyPIk*0O!ARQp>%NvFg0(}dtOH1(-c=lWvG=AkaFZjO~6rm3#gv#OLHPNHAd-+tP| zNw|xu`PEE^yBn8zPXWvqZe0yObSqy}_4ZhRW$xD94aT~0sFg9ke40GESEYz`w5OjHGR8cX7F@(S+Lu%dHAWq|1sAW5rtya( zx_Pq08|X9URV^Gb>bQYEgE!D;@CNz}-awzh8|X851AWGP*a%02XAg-EQz#jrWVe2l xSk|J$Y%p%1&#WIK|G$pYx6@(N_n?mgx}k6NAjJWiq3i1Y0Ee+VzfGk`{s2>QvHbu5 literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr200us/208.txt b/paper_results/schbench/skyloft_rr200us/208.txt new file mode 100644 index 0000000000000000000000000000000000000000..61a243b802a432fe068d56f6fe5e292fb2ef4ccc GIT binary patch literal 5516 zcmdT|!ET%|5bZU5#T-dgMIM6zN6H`cP^E_)SBQ1HC<|<`(Wd?S4huFWjiTgG$@sE( zJv)AH-owN4SU#!Y1m8-d+NxF>PO7g|Yw9Ci$AebYHS`0`8wCPleGRc-3?-P(lt<8I zbE3%W=n6ovTV@V-z&XM^ViYXNR#9?}l3Bh)Ny&s4bpujPxiC$+&6NuFDwVvI-1K}A}M zw-Dx-a~qX1^s546RW5`%Ph~cz!X)()O@w(a#Kf5bF^wL+|5)qVPgNi_UupXmO>Pw{ zQfAA{y*+Q7^0LUCT0T8OKIcG2V@95MIVRk)NN262lo(Y*-;))gKj}N-UL?`uC)|o` zvMc>iyhY2|HM5u@P8cIwF1jqhrATJ(6h%;cVHWj}mTZX%Dg2;CJru<(9>f&1+Rn2p z7wnUEwT!9ANd9=|4rQ5@GUhT%gd!hP2IJ$&Z4F?ef_f2^&bU!G9Im8Muf+%DvvvTD$f))4SlXnb=aA1 zoSITU>76#+QML2><)hvomAlwgzpL?RcjG+WQUKG1Ti4xBQ}@q$ci9$Tp1XCIV5nQu z6G(eD*oENcgwZvo8DxkU-DwmsMt2qljL|+4gzQIQ!3A7LdpugGA>K3YQ8M5<+9ncm z9i;^qZXK!buSan6=n6N`XK(|31~<@Wa07jY{zk4xi1*9}`V4NM&)^37481;Dj{yD1 zgTu5g8L;STdC@kn1&5h1ZlKTb|JY~7H>cfT?!X)rG+kSpu2*}gx~3`HJ?w|(beZ-l F`U@CgvC;ql literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr200us/224.txt b/paper_results/schbench/skyloft_rr200us/224.txt new file mode 100644 index 0000000000000000000000000000000000000000..731ce906a182564e72d2867e2f699f4c3539852f GIT binary patch literal 5515 zcmdT|O>dkq5bZVmiaC;~s_++PBIO_SP^E_)5n`P#$^si~v}u2R&4LYVKBQEtWPHKK zPvbY9=Na%r@ud1Ad@qb@%1UWCs;*RxsSdUu_gac)6p3ljvSO_ez`3I*jx~hZ*N1ftUy>FIc*5VJXiXDoZoI@MU(G z2|*`kVop-B0L>1wRQLrsT`xB{Ol6vkKJ$zw7~TK)xsGc$?t+cxMQqbI+@zBOD2a~+ zUGTc`kw@uhLA*p*=FUdOeW=!mfwx@&N8Xrm7L|S1St&wy)VF|A$mrn{Y9+}iS45Iz zZ@A^slEyTBPp;IdI%X)cWVHcau3}bVhE}?>rswDpnWavynZ++L3%N3qCZCh* z>@lO_BaaES{Ld|B2@`7pxlmCitVSmk@^tCULa&_TjU}^dUd|q zT=O~;&5E7%xIS(!_*Q4{=+SKhBJ0Xv^Y|>tk`Uu(9b=Wy*bS+F@u4 z{iJhk+JkDQ`SYXN9hC3nk(-_&#@Z+KfH}GkAh(?%?xRb7A#;?DT)?rOKK(q?5Mzy= z-Pj3`qpkbv7Th?xn*V<$sv&aRK%F5Qs54{(b;kc_yOR^59yd^D$Oh^R*+88k8>lnx z62={w@cwV2D}N*5NCtSW{qdkq5bZVmiaC;~s{9E!QvN{?ReQ)0A=d4pEU>YSHtnyk3AWd4(C<-v6X%3*9 z{%9kwvnv3OJ|VNe1&%4WkzgpX zFMKM-`6{OGhCBQb`sV45V!^Vn@^O;dyq7TLuMj2_m1F^sV+-;Er6WwJK#ogJ5TZ1~ zc_vI4sVbL?A$F0<6D}xqjB`q32fBCkqt6EgB)x4B(Hq?ENHsH48M#im7e{|>iewAdBf z9R+@+e4JrN%c4f;!axBzwh0|_h7c|y^yl(nE<#i#*xHL{zp`_Xt#*8f{>iTqWkTNS zSRx7$lQ|bFb}X~!I7WJ=Y#mFyuS+ieLb=4j{G}-WpiDR+OXaP$RJ_Hpf$U4Q4W12W z&fZ(mt~cSfiamRQ-TQvNoLuE`l;bU)ML6rF*3A!Do4u0FP6N(Pqk51k{DUx*jrQ7Y zOg9d89c6uERQFWt?w5p;&UfpFdH=)MHiudto0AP&hCX$s-PdNEr>4;ly4Geq$ib~Y z|LJxI86N8PXFK2RVVssT1+YAXbv=GJoqE*u^R@tP9@ayGsT+*4AibykLUMD9F`RS< zGDVE8m3SG`)79!2I())9`WFW;WQw)s9$dmY`gbc2HANju4=!OHeG~CeQ`Av-aOvt8 zUc)@p1a-t4=rec&edhcnnXn%>&}Z-l`i%XeT8|XZ^#=M3-awzh8|X76IsdJX2cYbV tY@pAO4fGlEAHQejpH9cg+=AIl=*OWmqmnyl$G&fd9qgw5_&ilI`wgE@vJ?OS literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr200us/256.txt b/paper_results/schbench/skyloft_rr200us/256.txt new file mode 100644 index 0000000000000000000000000000000000000000..2a4e7d78da37c360e749c6aa815cb22b30df780d GIT binary patch literal 5516 zcmdT|%Wm5+5bQPj3Lcf92>8&08z6tsLy;bG6a<;A1zVCsQgGU@FJ)<2(RB*=P=MZx zL~}vT%nr4(UhAD259CX2Ro}GAkb}~V>TSDs_vvU#+@QLNzFY;Gk_e39c;ZP zrtS_d^FFyFB&UzmZl4IxI9VcP8I(nk1xpsAAbl2qTxUHHfO#z@na|3y0>Y)>5vC0p zNZc{agbZ@Jwtr@yNmsu=bI7vfMZJy6*lP*1>|ca);Ygms5iU6o5FlY*aJq0wIn4xH zW&I({iyRg*sgOdflE|dWi7=Op<5EoiO_)ns%$##xFqyo3{T9|$&nmcTp4GOvK$9;O zgA!SibWQQWR~|qtIyjev$Z?HT1`RT%LO}^jtpGBaG^5teibNmGljH8H_j-j}8J+D4 z0PGSiZ&%0`A39p#NCvR5dW?Bjrvha5fnCdzqN#zhv!yQBRi16Pva6ufRqh(QB4b7= z<>iG+ddj(A-eio^wH2Mq@EjK`TcmN6#Rrtvh6Q%M6dJaaC2q)BXzGbF&!xQ5u%0Zt zK-60nC>A$e?|Rv?;Xy(f)~3GJvZ6>jWla4;Rd%aYy)lF($3gFu_CN5vNCx-Rt!+E> zZk{GxZFT!znf8|=rI;SApC(i7ho;_F=1?D9+8pNC+GbnXVH(=n>`Z0suvh&uf4a1r zz4A9z^Rt;A?LLn4ks@Ti@$+W*Zd?6ks?)ZFEMq_K7Z}^#Y6lXHj}y_29Y%)WmTQa{ zv6Ugz7%>V27qO0b@DXZ^I_3y2VjZ!gBh(mmED>D9I^ugQLXA;JKydNu==y#b(T$Tk zb_0C|UjV(BG3s~&ea8JW3?oJzZ=lb(|7c;vsN)Uv8UMWm)r?Wc8|X7UI^D3K%atZ~ x{091r`x+KTbhdkq5bZVmiaC<1s{F-=Bjq3TP^E_)5n|mg$^skPXw&}sngts>rB$P2zo7@0o=p)j{JIKLBic!IEfZ_`ZGlxrYQD)3Ty$8W8gLoc=(r?sTVEPBWB&SwNTJVyG92dDPFVRBM$R#}YP^(;4 zaAHvmW8{HWxvV72f`~cQufZgjWEjngGn$j+;q#ZUsB+GMDdnre<{Ki(!kh3KS#genwp%OZ zyOpvW`#rS^_T5_kt~q_TX6#qNZ+EMeit$EaxBTk6aoex2YUTX8IyyAQ>{mqRHrw(m z!$~KUs=tX!AEl@d3UG26+QUh>i>mqFOwV)|m${t)<_ov4hOfGn z&#JoK7GPPseRsgv_F5W}wx{hzbZf)W4Tyzgj2!KgKju7AOD^I(+Tm!a##rmjl8ZQx zcHUd6G3L?D<%siW--=?+Be&$@&7&FnVMMo1Zp1}M29JD4+M2Xlt}&p9*wXF81f4)jq#H}tI@ Uq&Pq`bY0ya;4pUQ>r#s3C#@i_1ONa4 literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr200us/4.txt b/paper_results/schbench/skyloft_rr200us/4.txt new file mode 100644 index 0000000..45c1c0d --- /dev/null +++ b/paper_results/schbench/skyloft_rr200us/4.txt @@ -0,0 +1,55 @@ +Wakeup Latencies percentiles (usec) runtime 5 (s) (7328 total samples) + 50.0th: 1 (0 samples) + 90.0th: 1 (0 samples) + * 99.0th: 2 (451 samples) + 99.9th: 2 (0 samples) + min=1, max=4 +Request Latencies percentiles (usec) runtime 5 (s) (7592 total samples) + 50.0th: 2636 (0 samples) + 90.0th: 2644 (3142 samples) + * 99.0th: 2644 (0 samples) + 99.9th: 2644 (0 samples) + min=2511, max=2699 +RPS percentiles (requests) runtime 5 (s) (6 total samples) + 20.0th: 1518 (5 samples) + * 50.0th: 1518 (0 samples) + 90.0th: 1522 (1 samples) + min=1516, max=1521 +current rps: 1518 +Wakeup Latencies percentiles (usec) runtime 10 (s) (14511 total samples) + 50.0th: 1 (0 samples) + 90.0th: 1 (0 samples) + * 99.0th: 2 (786 samples) + 99.9th: 2 (0 samples) + min=1, max=4 +Request Latencies percentiles (usec) runtime 10 (s) (15180 total samples) + 50.0th: 2636 (0 samples) + 90.0th: 2644 (6281 samples) + * 99.0th: 2644 (0 samples) + 99.9th: 2644 (0 samples) + min=2511, max=2699 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 1518 (10 samples) + * 50.0th: 1518 (0 samples) + 90.0th: 1518 (0 samples) + min=1516, max=1521 +current rps: 1518 +Wakeup Latencies percentiles (usec) runtime 10 (s) (14511 total samples) + 50.0th: 1 (0 samples) + 90.0th: 1 (0 samples) + * 99.0th: 2 (786 samples) + 99.9th: 2 (0 samples) + min=1, max=4 +Request Latencies percentiles (usec) runtime 10 (s) (15184 total samples) + 50.0th: 2636 (0 samples) + 90.0th: 2644 (6285 samples) + * 99.0th: 2644 (0 samples) + 99.9th: 2644 (0 samples) + min=2511, max=2699 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 1518 (10 samples) + * 50.0th: 1518 (0 samples) + 90.0th: 1518 (0 samples) + min=1516, max=1521 +average rps: 1518 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_rr200us/40.txt b/paper_results/schbench/skyloft_rr200us/40.txt new file mode 100644 index 0000000000000000000000000000000000000000..15c569da83d791eca0ef481a83fc713f0e94b541 GIT binary patch literal 5512 zcmdT|!EW3j5bZVoiaBngssao)M9LrZP^E_)E##$LYuEM$ur}@27q1PrSDUnnR0(r2 zm@&LJ^M*lwl6%!1;j6T&DJx~*sC20sTOC|~(-~FXg6`b7R={9mZec_7Tu`vBl?O1g zK03?W2WEwVIC%1&!1M9@cb4NqH)I8MhOD5@kpEd{`oE@aXYax86x3}~*;cD9lxf*F5bZVo6?1f=s^Y^K6Dj|ohpIi~Xjxv`wX(K1fVF9VeQ~^Ch<2-{QYAc> zHOmvcH}haDKQ>QtI>P71$f50|hND!i987nx&-tWfdkbo^<6Z)TT;D=oR2*|KqiGJH zoBn8#x7iH^B_(Eyv4_mKRi#bG?M{Sb69n3W1jubI`A1@a2BYi6b1to+S(fmyBB-)B}YSj;Ex`sobaN(reh3l7~% zxgepI%WkdE6~U~sdz>xJ>=_8JjDK^hlZr2$%GIx{*DP?|_0C^Zp!sGs4_TYNlFd#7&Q7CxlFI*4RM}`R+{Sd{V5d>mH%4_YweEgN;B>xKKg_8+jBRtM z^|3iw*b@5GnRZ{BaUPmRKj~VV@gRru{OeD*J4pXfw?EtYR`+pQ&J)1$;OF)D-E`_% z*RR_GoX39NFEDk3QI@2uubW72Y&rTqaweG~N7qJD&ZED*63(N49Y~lXb45;=qig4m zYKmuuJ8}uP;_v1)zoZZ-0jW|-|xPd-{H_&JB2Ko%%K%c=I=ri_kaf7yn#MLlB;cOJwVwF*+8Em8|X9SfA*RAtLZqIdoX(m{Wx@HRB{LH*!Rt_gWc30 JUx!L&zX5CVuhswn literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr200us/64.txt b/paper_results/schbench/skyloft_rr200us/64.txt new file mode 100644 index 0000000000000000000000000000000000000000..58ad9a36ccdd49776071066ba3ff31fe6342f6a4 GIT binary patch literal 5511 zcmdT|%Wj-76zw*A#a)u9s_+AhBjpdesM1B2$YPuhngJ%Z(WL$Qnt%;XMp2_?VeV!y zKC;ib_t<9qAP#ak!FOR~S2t3_Nvc|Qra9W@bkMTC1~u4rDqv*z-^`MmXA*hqSwUxGS9zI}IlucgcjOO%$tV>?S z9FbQB^ImlYhLn^nTceiRkjNP6U=b#l9!_VZP^32=KtbWCly^%uUX(C^0Hw=UY}uD zf|FKiEq=+$?3ENd4JbMH>L8VWQP8Bf`>ild-`VagX@ybEbETU<5*Qgb)i2{hKlZga zR{A8)7B+=GH>TcKrXQO|=!34b>5sCT`(J*V-BJ37s{UP%o4Swlw4MN_2j8#zN7JY$ zUA;^TFt>fbA7E%Yqby19=(#7lw&mzgh=@75P2D9p#yk>7F5*1;Z8&0%{<9`xj&9#} zC6CdM%#n*YkM6tAQH?Q=+>whnkM`UTBf55SWj{5-h>_z4&Kb0Ub7uLQJC{7hT5sT- zK^r({&<4&Kd!-OYjDFm}Ib$zy!icV&T-gh$Fk!%3<}_NG1B=iR7fa|>#;^G-qmJADhgoFhiT3`QJ5 zi|%BRx7iIqfj%R1xC2ZPEMtdr#_|Zo7Zk+-iWj5`P#=NwITq!_$I59{1R$6|eA+Na zcm)v}VY;@z$Dg1R&-Xd1vIqGzN^Rb$mzDT0dP`nbUT=YM84+-L87`gR9p*VD0czFD zNabVYuR<+)D@;qbA!9)tfJrVXF`E_J0ElM~U%$mwRdW(-Enif&yrPmU_2L}mZqt~% zt2^f_t?M-p5$F|f&T%pG<~+w)Gb&|`2k=C|(^#IjX{>uf+?^S~2#*2UpB$LD6=BSqqNdXI3wm7iDp!j!tmsR!Z_Bu7eZI zaPF&38L<)9lfDWL-O9^6Ji_bgtZ;BVP>lIUyE?75DzC4h)v&7_EimNoa`CnwT^`;> zOw=RC7B3xZJ={sH#ZOtAgOXye0cYnyJxb-jU?dytjVnw$^mZ9#oiM6>u66rc0;AKl z`e8bC$Dt9&TA#$(!semRt!WOm8K$Wb`cc=~3`f~7>z9vqf0X{AZhke>weH7xK2QMj zgJ0Lf57VkAU0=2ZSmu7+2N>JlC`;1aY;GaBx#j3j@Io?0j=tF?%#pexC(O~^U5;vs zwJsdFg!AZLGaS_v^T-^zg!Aa$=^WJ*^T-{!bo03WWJs9f2Ko%%K%c=I=rec&eFkr! z&)A>L@ksD~#2e@{cmsU~Z=lbR|xp-`e-4CEhx5e0WaNt{cvnET)D0K&=sWd z5sy#V%!7UEg&Z5H1`}6Qw-<0G0Srr-rk~*(9nn5 zEc(jsd#3d3C}t_io}q|IN+yJHp6~Gz-^3D=-EIy_Q@nSA9HLIpl7yxCWW+!dXDAt& z;yf{AzkFL}P>$?P-X3LfErWPMo+i1a=idE|&9NKt2=>Yz%ZLHC`92G$d12h=b|RQmps#aC3&+sJeFtXclOV!bf z2^*Wba9y2BEry|{>a0qoMa|l2{rbU=HM5I(!RWk(t{{EH=)9!7#h3<=>-o7iL-jG&*$i&_^bzCy_?)5o zh%ukRd3=xi05BH*V>GtEY~GEdjd=(!n~xX+@EH_<&!7N&1_j_VC;*>90r(6Gz-LeZ zK7+lh*-J}-B--ErdVsvl96stKe@JKD>aH0+d<~V zW<-$u$j_ppr}8BF3w$lLXscQ%xCmK^R@Z0q9`{OAcOZMCHv&?K)g8nMM?3-D>GBM! zY%a#~F4_U4=nK-vJz&aUk{Bn9bI*!TRy0Ag6;Cl+Slt8Xqpy@wXO&QrER0|<;zPic z=d)2lJ)5R(AMj^r%9n?X(dbb;_d@9p;?3!Mcyq)olo+SvCIO3=CuwG@LJ%^_T)P=B z&k|d_m@?vHv3Pkl2Q*cj{QwMbNruRX7!yo!^!V+&-&Hvl!SwQ`v)M{l$`Ha4$E6M(ft`;lCD?rx>yNtR{*yZ*!_kBs} z6~Z#?2%DfcgvC)Klq!FULLa3l4+?N}?c_;F_X9&wXExkY*IjFtQPh-L)-Q#se+$Uz zu&aI=*mCZw@?5A(c{OIEsB5jOW1+i2)ulSALh0@-+G+juQ6J92y%g22YS`7CT*vPT zV0>}ws{5g9`K*f9umF?p)}4dCZnZQ?+U?IS1gD!Eu}yAzhL|JSK#p!62V6%xsBAey z?6sQ$0oT!v^N{Q4j(!2x(H#o{&e0uk!p+f)eP4ppqn$aD_!44{8<;bA19Jv%V9wwT z%o+1{%9jwonGMVtbBE$fh;`h+oFT#0&2KB9Xh$|MXUGQTjJaa-<$v~mxHav1y$5|1 Z&~$CBJ1GuObxl*Y2RQW2O;V-p!OUeb zelqjsJ$qOl>Vp_h@VVBaZ(5=<26Cgb_;a#p5WHekWm;;Wr3#>=>sgXBi2zTnKYYu`LVED54%}CCp3Y zP$bJs53~|SD8oyQ%>z+@nFwRb%Y`#72}|z3d<}J#a}|u5&$N9>S8NGz!)%V!*Yw6I z&v{;0#4APc5WeC7Q{J30Hs{T`NxK=PG*$$8QnyCzyhtA2;a20Qi(NS@2z^66#4IaF zF~{83#gu#@pPT!ZTFyy;TGgX$H8o>AWN7VL5bl;L%TVEeva4gv3!~KQ8`Sq}j!F_5 z$TIf*nlo&+{JPm!#=c+6GJlz|@he@7ks|i{2K@`aTCNDaKy-EN>PG|g^S6hUZ`o(b zJKL{|gor;|7W1SNO4Z**rT0?QI|V2?5Aq16#d`QHA?sH%_k1t40_x=D2}AgEr7-&<6U9 z`KuX5jDFlepFtbwGv@P07%^(Sfj)zyt6L~5WXToYK%c=I=rik=$fd^rQ~T-5=`iYB b(0c*h(6@S!Vh7F8b#=dk-PoNjLn)G9UnQ|h literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr200us/all.csv b/paper_results/schbench/skyloft_rr200us/all.csv new file mode 100644 index 0000000..62c76aa --- /dev/null +++ b/paper_results/schbench/skyloft_rr200us/all.csv @@ -0,0 +1,22 @@ +cores,wake99,rps50,lat99 +4,2,1518,2644 +8,2,3076,2644 +16,2,6248,2644 +24,16,9008,2660 +32,197,9328,4792 +40,198,9616,4808 +48,200,9936,4856 +64,384,9936,7240 +72,392,9936,7288 +80,508,9936,9648 +96,583,9936,9712 +112,703,9936,12080 +128,859,9616,23712 +144,973,9904,14576 +160,1122,9904,16928 +176,1270,9936,19232 +192,1338,9936,19424 +208,1398,9936,21728 +224,1590,9840,34496 +240,1590,9936,24224 +256,1770,9936,26592 diff --git a/paper_results/schbench/skyloft_rr50us/112.txt b/paper_results/schbench/skyloft_rr50us/112.txt new file mode 100644 index 0000000..52d6112 --- /dev/null +++ b/paper_results/schbench/skyloft_rr50us/112.txt @@ -0,0 +1,17 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (99004 total samples) + 50.0th: 159 (29341 samples) + 90.0th: 194 (39731 samples) + * 99.0th: 201 (8798 samples) + 99.9th: 204 (185 samples) + min=1, max=368 +Request Latencies percentiles (usec) runtime 10 (s) (99219 total samples) + 50.0th: 11728 (32841 samples) + 90.0th: 11952 (35548 samples) + * 99.0th: 12048 (8526 samples) + 99.9th: 12240 (867 samples) + min=4648, max=15835 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9872 (3 samples) + * 50.0th: 9904 (8 samples) + 90.0th: 9904 (0 samples) + min=9836, max=9919 diff --git a/paper_results/schbench/skyloft_rr50us/128.txt b/paper_results/schbench/skyloft_rr50us/128.txt new file mode 100644 index 0000000000000000000000000000000000000000..9b3b4b6fd7a9b1975d9a044e8820ecb392305603 GIT binary patch literal 5520 zcmds5!H%0S5bbsOiaD~Ys`3~daHRc%9;)qOkH{i!gAyQXn`YauuL)pqlvdiRs!ZpC zjh}`$GtVC2SsY|{f{((;rmUoflT@W_Om(#5xYM${2i4hmEddF{`W|AQ^PGWcjW~i9 z^~pl-qdNdbJ`l6t0_GbSEv7s{=ArnAqDkUV3D4MOfqDSW2VayChvJwg3lQ`m-anAC z*%c+|Vd|=R#P?8(*T*zv(UW}Xq&9C3INSU#VB~b987B#$S1hNrq%_ZmVB%y$*plZQ zFs3Pv>5Dg6LcCz6fH7rAM^|Z{Y=Lw2A6)g9kSq2#ocM^IJI|2})UBs1Dm$gD9zmV-))t&3(es~3mJ`Zv#Ptiw z(rgi$)!A*yW=mnu6}*z{m8*XxS&r!&B+Js90i!QjzC_IdUrBbkk>@??OO|_&>&wn^ z%>NI?if>YhuX0i>vZ$6?+dGBXD=BswFgmyDAk}<>=22@;USX=XvFjwNg;CXOp{pMf zvZOy<-}U?M*p}j0=#w~G*kJlxnQ~v4wx1fI54zB%J<4WWzg()_QMy4AF|oc`r^{5y=qJ=#vB&@b literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr50us/144.txt b/paper_results/schbench/skyloft_rr50us/144.txt new file mode 100644 index 0000000000000000000000000000000000000000..b58799db222ea7febaf01f7451ef741952d5d412 GIT binary patch literal 5518 zcmdT|O^=%}5bbsO6>}u3s=~&A9clkT57qXtM`RJVLE&p{(`@_eYXWv~x>~hWE19_j zBs#+)*gseoT>&6_%qY~8)mN0F*$ib`Ud`kQ8*Uc>{{K zgGt>*cK})PA<@S@V2ogi7&C&2m&I%rDO^@cb2GZGbq|a!Yh~DFQChH75==?#a>Fcl zNu)@;oVM;B(0ge4%L8FW^eCQ3q4XaTri`LD2qS{>WJVBUQqbH(UoUAdnf*6jzx@kEvZq^qjCY(?ct_{Oohueg0g zeaGIo(GvC@yIyD}oF^Gw44Wg8Y}c^-n~?mgSPaW>6174pbEecsDfmGFiq5?}33)w1 zNYtBsm+QLk%)GgTYgxaPs{SFMNTKcuXt0cfuBv<9O_)_>R9T28fvais#Lmf zM7PXee(FOb+(B7=ucpo2#d&_90OkWXulg^#md~oZ+!kONyLq?3Sa(_)kjswb&IdO( z7+q&tP=<)n?y$CyA!1A|xPa?u`;dhiVjVLJF5o)4m#cs=y3QOhM!Es5S@;;u-w;27 z8%KBMCEAY=F>avGpbhjHw1GZjUhw@0(T^MGGiU>S25q3vn2#TR1n5Wmg~b645#t8> p4BkMW!T+((Oy5rXQSU(?1+;xv>t2cjRDIj>?f{3e9WFyDqMvLev9ACC literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr50us/16.txt b/paper_results/schbench/skyloft_rr50us/16.txt new file mode 100644 index 0000000000000000000000000000000000000000..1a4ee9c2258a832dd5ceda2d07931c44b6414ef7 GIT binary patch literal 5475 zcmd5=!EW3j5bd@8iaBnoDgq3~M9LrZP^E_)Ez3*0R@U|gur}@2cUUh3w@Ip1TL~^} z!YsTu^M<$1efyy21AJ=kU76lDo+a_t^$ zI~*|bs(23|$Te}h8=w%!hO8pgA0j@0Qp+AJZUGxw3Ax@zT`PZ38nixW=*L_75r+2Z zma^hbKhD~^%ShIxq;HU9f>vjYuaIOF=OKc!vaS;j3DHUf8XI1wG2oN2@JwQU=ZQ%8 zLX}A_io4HW)-^N>d#v0qb*x^X$IcZJyo%t<74T5yG-3@ZR9VzD2?Le;QU#hGR@eWGWDORwa)yeCi zPPU5h72=YJ*VO6Pia}f3e$$QH8QpFz(Be3m2W_7JP!$v2rLF6y5tm6Zw9fQTjqQJE zkfpyc-}&?RQ`hbrduWdswhVjhUAJr8^P1~ z^8hRlaotW|U2h(3b2=8_Jdf*fgSj7_L6D(?!$L-Lt|aOUF(zGof|(IUAp{W>Vhopj zhIv#~MU_O1WdL{n%qWjc$g)<5F}PBInh{2{9w7;06aid*d6YG;DMyUq{tQqv%A=|T z+6rQn0bG80#GWC{_e}7R05z*TqEq?%Q3Y`Mx_K{F;)cqI=&=_?#`>P1;_i5X6-W^->VQ~ z#y)c@E&lGox5)L#sMY}^%MQ~zWq^5P8T(AkW5zxcG5$}V@&6E=W_JT_r(u}J-c3et Sp__)G9k;NZhr?-Vbnz2^hOSru literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr50us/160.txt b/paper_results/schbench/skyloft_rr50us/160.txt new file mode 100644 index 0000000..efd1cca --- /dev/null +++ b/paper_results/schbench/skyloft_rr50us/160.txt @@ -0,0 +1,17 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (99208 total samples) + 50.0th: 253 (28836 samples) + 90.0th: 292 (40954 samples) + * 99.0th: 301 (7816 samples) + 99.9th: 304 (635 samples) + min=1, max=527 +Request Latencies percentiles (usec) runtime 10 (s) (99298 total samples) + 50.0th: 16368 (29675 samples) + 90.0th: 16736 (43915 samples) + * 99.0th: 16864 (4499 samples) + 99.9th: 17376 (250 samples) + min=6941, max=20515 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9872 (3 samples) + * 50.0th: 9904 (8 samples) + 90.0th: 9904 (0 samples) + min=9779, max=9911 diff --git a/paper_results/schbench/skyloft_rr50us/176.txt b/paper_results/schbench/skyloft_rr50us/176.txt new file mode 100644 index 0000000000000000000000000000000000000000..c6046dd342a87e615f7c3dc3ad1be8e2c8a5ae5d GIT binary patch literal 5518 zcmds5%Wj-76zw*A#a)u9iu@ScI8y$giz;1Yi7dwHpbRjvO(*Tw*9_pn(I{#wRWaL}gGt*iIGpZ8W**I;@dHySX4++IU2CEiGIol|?T zsyX=7>+A}Eph+#ekv-^_UR(FOgb`-%5XPK~34te!QH)?Fj3wGklyk}%LJOQ{ z!dPNHEnm(s<%=X3R^>z(OUmb|n1~;DBFqFOBWc29!LoNikVZg@pXCL?q|Zw zij{;dlYYHox#aY}C|LDP8}(Js1xsYsXlwnQ((Q~^TMIZlcIH`|^9{kX&Y!!=)m`hy zQPwDD>etfNKQstBoUI=Q8TMVJ_N6_jqfZ-~J=U(;m985?r|h#Wt?Tx>oyJd>db`)5 zQ&!)r;cyS_IBqC_u@lB^_vC8xV$0KG0j4;NhY9++b;g4X=lL{}TNm1+Bhi0fLVq`P8>6#+O$pQk>6fuf`ZtSFpF}xKgthG#k9sOrW+>(p4EB_K5 zw-hn1z-RCZe8zv2#w|sySKu>v1wMmU;4^pyK7&`_GbFiv93L})vMaIzpCK#o8S)o> iX83p7_3jSbPD9hRwd;)DLe({mYPYcMo5LwII{OLUb+P6E literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr50us/192.txt b/paper_results/schbench/skyloft_rr50us/192.txt new file mode 100644 index 0000000..3d38c68 --- /dev/null +++ b/paper_results/schbench/skyloft_rr50us/192.txt @@ -0,0 +1,37 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (99122 total samples) + 50.0th: 318 (29241 samples) + 90.0th: 346 (40420 samples) + * 99.0th: 352 (8417 samples) + 99.9th: 523 (606 samples) + min=1, max=755 +Request Latencies percentiles (usec) runtime 10 (s) (98975 total samples) + 50.0th: 18848 (25510 samples) + 90.0th: 19104 (42453 samples) + * 99.0th: 19296 (4945 samples) + 99.9th: 20320 (630 samples) + min=17052, max=25020 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9872 (5 samples) + * 50.0th: 9904 (4 samples) + 90.0th: 9936 (2 samples) + min=9780, max=9925 +current rps: 9920 +Wakeup Latencies percentiles (usec) runtime 10 (s) (99238 total samples) + 50.0th: 318 (29281 samples) + 90.0th: 346 (40472 samples) + * 99.0th: 352 (8429 samples) + 99.9th: 523 (608 samples) + min=1, max=755 +Request Latencies percentiles (usec) runtime 10 (s) (99278 total samples) + 50.0th: 18848 (25557 samples) + 90.0th: 19104 (42513 samples) + * 99.0th: 19296 (4949 samples) + 99.9th: 20384 (638 samples) + min=9906, max=25020 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9872 (5 samples) + * 50.0th: 9904 (4 samples) + 90.0th: 9936 (2 samples) + min=9780, max=9925 +average rps: 9928 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_rr50us/208.txt b/paper_results/schbench/skyloft_rr50us/208.txt new file mode 100644 index 0000000..a718963 --- /dev/null +++ b/paper_results/schbench/skyloft_rr50us/208.txt @@ -0,0 +1,19 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (92724 total samples) + 50.0th: 343 (28079 samples) + 90.0th: 388 (36760 samples) + * 99.0th: 401 (7919 samples) + 99.9th: 473 (649 samples) + min=1, max=748 +Request Latencies percentiles (usec) runtime 10 (s) (92544 total samples) + 50.0th: 21024 (27398 samples) + 90.0th: 21472 (37769 samples) + * 99.0th: 21600 (4600 samples) + 99.9th: 22624 (623 samples) + min=18146, max=32174 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9072 (7 samples) + * 50.0th: 9072 (0 samples) + 90.0th: 9744 (3 samples) + min=9060, max=9775 +current rps: 9061 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_rr50us/224.txt b/paper_results/schbench/skyloft_rr50us/224.txt new file mode 100644 index 0000000..02b1a4a --- /dev/null +++ b/paper_results/schbench/skyloft_rr50us/224.txt @@ -0,0 +1,18 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (99499 total samples) + 50.0th: 375 (29720 samples) + 90.0th: 424 (39850 samples) + * 99.0th: 450 (9013 samples) + 99.9th: 553 (707 samples) + min=1, max=852 +Request Latencies percentiles (usec) runtime 10 (s) (99530 total samples) + 50.0th: 21408 (34538 samples) + 90.0th: 23776 (37521 samples) + * 99.0th: 23968 (6096 samples) + 99.9th: 24160 (343 samples) + min=10685, max=33698 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9872 (3 samples) + * 50.0th: 9904 (6 samples) + 90.0th: 9936 (2 samples) + min=9760, max=9938 +average rps: 9953 diff --git a/paper_results/schbench/skyloft_rr50us/24.txt b/paper_results/schbench/skyloft_rr50us/24.txt new file mode 100644 index 0000000..72e98c4 --- /dev/null +++ b/paper_results/schbench/skyloft_rr50us/24.txt @@ -0,0 +1,37 @@ +Wakeup Latencies percentiles (usec) runtime 5 (s) (44274 total samples) + 50.0th: 1 (0 samples) + 90.0th: 2 (15274 samples) + * 99.0th: 4 (2396 samples) + 99.9th: 5 (62 samples) + min=1, max=40 +Request Latencies percentiles (usec) runtime 5 (s) (45245 total samples) + 50.0th: 2524 (8428 samples) + 90.0th: 2652 (18997 samples) + * 99.0th: 2676 (2653 samples) + 99.9th: 2676 (0 samples) + min=2514, max=4190 +RPS percentiles (requests) runtime 5 (s) (6 total samples) + 20.0th: 8848 (3 samples) + * 50.0th: 8848 (0 samples) + 90.0th: 9232 (3 samples) + min=8839, max=9219 +current rps: 8839 +Wakeup Latencies percentiles (usec) runtime 10 (s) (83812 total samples) + 50.0th: 1 (0 samples) + 90.0th: 2 (27988 samples) + * 99.0th: 3 (2623 samples) + 99.9th: 5 (424 samples) + min=1, max=40 +Request Latencies percentiles (usec) runtime 10 (s) (85641 total samples) + 50.0th: 2524 (16726 samples) + 90.0th: 2652 (35759 samples) + * 99.0th: 2676 (4987 samples) + 99.9th: 2676 (0 samples) + min=2514, max=4190 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 8848 (8 samples) + * 50.0th: 8848 (0 samples) + 90.0th: 9200 (2 samples) + min=5043, max=9219 +current rps: 5043 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_rr50us/240.txt b/paper_results/schbench/skyloft_rr50us/240.txt new file mode 100644 index 0000000000000000000000000000000000000000..a51df3ed35e55e6c0b218d01da67f3a9b3e0e821 GIT binary patch literal 5519 zcmds5%Z}S16zz8W6?f@WRb?KIA=3Oo7u9qzOA9%)vEoMta5L@K7sm#NnY2=?R>EZy zFh}^@dk$QaM{$(H2|frTyQ-ENPEu8}Gj(J8>7Zq`0X5ihD*J6lvqk@6yjc7oN z_GF=(KehQUUQy#3GFV(gAdo3-V zMdLmMNJIWke48n!Sb(tP-5NZblLcuQ-bkG@#9=;#7|;aKb5W%Dgcg^neG zua|&B$L80z_?ct#;B}XPGe)-mg=586sl*pKJJ#xuv{GyPq%;R5#a;tS&b>NH)0)bzPF)uA-~I5a{Zb*W9? z$Znp${M7qK`c7GWtH#saxAU~40H%(g*ZpTxt0!GvUJEdf{k&gbs5_%9$@xg~&!Zb# zj>xl{Ix%u|2dJYOBS&B2XOb~;bY&*yJo=BIi1WC035=Lyu>y{41swgCbi7*Idq~)# z8z*=6D>`g3a$G^3S+0_EI%CY^3hE46L7hP>s558#`^@;~X+M}9n1h72?`qR4xreH6ThZ-dKeVUIP|4&69m=u( literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr50us/256.txt b/paper_results/schbench/skyloft_rr50us/256.txt new file mode 100644 index 0000000..e23ac25 --- /dev/null +++ b/paper_results/schbench/skyloft_rr50us/256.txt @@ -0,0 +1,37 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (99236 total samples) + 50.0th: 436 (29821 samples) + 90.0th: 485 (39882 samples) + * 99.0th: 502 (8733 samples) + 99.9th: 813 (767 samples) + min=1, max=1005 +Request Latencies percentiles (usec) runtime 10 (s) (99023 total samples) + 50.0th: 25696 (30907 samples) + 90.0th: 26208 (38807 samples) + * 99.0th: 26400 (7262 samples) + 99.9th: 27552 (439 samples) + min=22628, max=27912 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9840 (3 samples) + * 50.0th: 9872 (3 samples) + 90.0th: 9904 (4 samples) + min=9770, max=9925 +current rps: 9903 +Wakeup Latencies percentiles (usec) runtime 10 (s) (99414 total samples) + 50.0th: 436 (29875 samples) + 90.0th: 485 (39968 samples) + * 99.0th: 502 (8756 samples) + 99.9th: 807 (765 samples) + min=1, max=1005 +Request Latencies percentiles (usec) runtime 10 (s) (99449 total samples) + 50.0th: 25696 (31032 samples) + 90.0th: 26208 (38891 samples) + * 99.0th: 26400 (7282 samples) + 99.9th: 27552 (439 samples) + min=8695, max=27912 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9840 (3 samples) + * 50.0th: 9872 (3 samples) + 90.0th: 9904 (4 samples) + min=9770, max=9925 +average rps: 9945 +timeout: the monitored command dumped core diff --git a/paper_results/schbench/skyloft_rr50us/32.txt b/paper_results/schbench/skyloft_rr50us/32.txt new file mode 100644 index 0000000000000000000000000000000000000000..d29fe1ae11cc2d479328356007c964c1593bc921 GIT binary patch literal 5500 zcmd5=O>dkq5bZVmiaC<1s{9R(lz-4el^$|jA=d4pEU>{woA%dtykIctDpDd5qe5;)4>c&_&nV~kFYmV5@M{DX^7)Bho1|;P67IL9j$-(umI)bg* z6I$M8Hvp1+CGKzs43?Jbk`@~yJsa_2HKJ5g5m|i(t*51gUabU|EHVObr2YFOS&by$ zn6yoIPrpE0z1&NlJ(%awSoc1%1ux}WWR(!}>vPJeeg$!6D@wLnX+c$la%C%tD3`U0 zjY}f7R0X~{6L#UKYN|l;%eTCLFrBcRGNU@#+ z@QqX@Ep%kH1}~PXR5CV-f)_#ukgGCUXL7;^r5fleqKe`zy0KQJ;@eyabYrc0F~wFZ zT}epbl2iFghf8MCH917N_3p>C8mLlwg=bJ}0EUXqV=cyNy{_l$YL)*4icA@%g{E7n zlB_k>R=>>79R^eFEzs=T4^L)T9&ncRxXD(o={uape^hR0UUs(mW1!%EC;jAe?bz4V zac57}8O^3<&yA}OJJ)rLtuKP) z3Hy<2B`Nwb)YkwtrC1|I+$e7F0JUvNVJs!?Og9kfd_bBMMy*PP7}vAogfcCRQWC@% zPD4rhu~ec&jO*z-ML(7W`VkBMwk0%Hc)TH~M?8#dTT&P~Uat^i%5{eNyB%Uoxz6}l zdyFa98T2EUh%x0lCNMhe83a-G3g3$MqN>kP#&8hD|hDc2dw3Wa`T z3F+^W7!e*tIrryBmUNvFlwqA=N!J-BO7F-2S!ev0(tdPz;0^}bzH3}Rm_5{e+g9Bk M_G5dx3dkq5bZVmiaC;~iZEbUBIO_SP^E_)k;OV)w1A0iv}u2RhYvf@LaJ14WqbkS zk$G?Cji2C=oq5;7C#HE*lw3i}Wx*R=R(S7siWk=)JDk@XDER6c{4@+B&_c5c6subd zy!NgDXmA_o(;X17!w(h_*>56lAj`%*Lc0a{5lxo6WVc!Wo_WmjJ$2dKlaEldm;1@qp@e!xCnd@aWUEj*{gcOcD3S!D zBnhS%*lyR@~m?#kZnAq3yVgT>~)Q*mWhIbSa-zKD-uSn%i}|L02|fB1n@&v*6G?&e9-D z5aY~Z2AB?Eqy%+H5u-WXoy=o!i2FQ72HbRYD34i^1jzw0nwB+C9m9A?5hFF=oXlew z6U3Ofi*Y~}AjWhD7>x}Xs1E7T<}owirmKS((e;B;1H>Au#2g?=%s}mwpj6>6tL3m=?Au z*n}he`_B2`w7KU;F`nQH*P^c*q2MHBEqdLw<~bdesINedX50xNkg6+4Q9(-#deFQD z#k-Sz&LnDvStZQsPvkgoQC+Q!&d7V%8B>CMJl? zN^yycVqnVSJ2f4rYH4Y=zP)@vw*j`!#z+#)(r9d_= zsYfY|=|G;;4yJGJ-{4D>&BnxALf8da?`2FxDGss|lq_)XIy?XFOfQmc3-p4>&~z9f zO4fu+g~O_rH5AZ%lvd}_O0*_RsFNJr?tP%^WFiNAN;%;zUxLl zsA_sGz%uspeu1&+wKS0K5O5O_jSE&HbKY&jZZly<#7Hn>B{7W7t`X+3B#dU6Vay%e za*rsFj1rbJ!{~ahLyd}&B5E0lgNrebG^5s!G-gIp5^5OBm>JEMv1gBKAr3VvKNeeJ z{m2~La*vC#pg7r>uD`yb8ym*pznTd)M#hNy%$zfm$B6rkXNOmc#UARR P>v+G1{n(wRp%lq){Clcj literal 0 HcmV?d00001 diff --git a/paper_results/schbench/skyloft_rr50us/80.txt b/paper_results/schbench/skyloft_rr50us/80.txt new file mode 100644 index 0000000..74666c7 --- /dev/null +++ b/paper_results/schbench/skyloft_rr50us/80.txt @@ -0,0 +1,16 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (98362 total samples) + 50.0th: 88 (29372 samples) + 90.0th: 136 (39551 samples) + * 99.0th: 151 (8891 samples) + 99.9th: 152 (306 samples) + min=1, max=236 +Request Latencies percentiles (usec) runtime 10 (s) (99118 total samples) + 50.0th: 7176 (34149 samples) + 90.0th: 9520 (35231 samples) + * 99.0th: 9616 (6941 samples) + 99.9th: 9712 (653 samples) + min=3920, max=12196 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9904 (11 samples) + * 50.0th: 9904 (0 samples) + 90.0th: 9904 (0 samples) diff --git a/paper_results/schbench/skyloft_rr50us/88.txt b/paper_results/schbench/skyloft_rr50us/88.txt new file mode 100644 index 0000000..4187511 --- /dev/null +++ b/paper_results/schbench/skyloft_rr50us/88.txt @@ -0,0 +1,54 @@ +Wakeup Latencies percentiles (usec) runtime 5 (s) (49282 total samples) + 50.0th: 111 (14664 samples) + 90.0th: 144 (19842 samples) + * 99.0th: 150 (4177 samples) + 99.9th: 153 (448 samples) + min=1, max=246 +Request Latencies percentiles (usec) runtime 5 (s) (49751 total samples) + 50.0th: 9360 (16269 samples) + 90.0th: 9520 (19557 samples) + * 99.0th: 9584 (1480 samples) + 99.9th: 9936 (407 samples) + min=6756, max=10128 +RPS percentiles (requests) runtime 5 (s) (6 total samples) + 20.0th: 9872 (2 samples) + * 50.0th: 9968 (4 samples) + 90.0th: 9968 (0 samples) + min=9884, max=9966 +current rps: 9962 +Wakeup Latencies percentiles (usec) runtime 10 (s) (98516 total samples) + 50.0th: 112 (29891 samples) + 90.0th: 144 (39504 samples) + * 99.0th: 150 (8369 samples) + 99.9th: 152 (847 samples) + min=1, max=246 +Request Latencies percentiles (usec) runtime 10 (s) (99564 total samples) + 50.0th: 9360 (33117 samples) + 90.0th: 9520 (38746 samples) + * 99.0th: 9584 (2895 samples) + 99.9th: 9648 (690 samples) + min=6720, max=10128 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9936 (3 samples) + * 50.0th: 9968 (8 samples) + 90.0th: 9968 (0 samples) + min=9884, max=9966 +current rps: 9954 +Wakeup Latencies percentiles (usec) runtime 10 (s) (98532 total samples) + 50.0th: 112 (29900 samples) + 90.0th: 144 (39508 samples) + * 99.0th: 150 (8369 samples) + 99.9th: 152 (847 samples) + min=1, max=246 +Request Latencies percentiles (usec) runtime 10 (s) (99668 total samples) + 50.0th: 9360 (33168 samples) + 90.0th: 9520 (38762 samples) + * 99.0th: 9584 (2898 samples) + 99.9th: 9648 (690 samples) + min=3377, max=10128 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9936 (3 samples) + * 50.0th: 9968 (8 samples) + 90.0th: 9968 (0 samples) + min=9884, max=9966 +average rps: 9967 diff --git a/paper_results/schbench/skyloft_rr50us/96.txt b/paper_results/schbench/skyloft_rr50us/96.txt new file mode 100644 index 0000000..66962e9 --- /dev/null +++ b/paper_results/schbench/skyloft_rr50us/96.txt @@ -0,0 +1,17 @@ +Wakeup Latencies percentiles (usec) runtime 10 (s) (99099 total samples) + 50.0th: 125 (28916 samples) + 90.0th: 147 (39168 samples) + * 99.0th: 151 (9372 samples) + 99.9th: 154 (348 samples) + min=1, max=328 +Request Latencies percentiles (usec) runtime 10 (s) (99173 total samples) + 50.0th: 9456 (30403 samples) + 90.0th: 9584 (38629 samples) + * 99.0th: 9680 (6002 samples) + 99.9th: 10000 (394 samples) + min=4106, max=16154 +RPS percentiles (requests) runtime 10 (s) (11 total samples) + 20.0th: 9872 (3 samples) + * 50.0th: 9904 (8 samples) + 90.0th: 9904 (0 samples) + min=9867, max=9909 diff --git a/paper_results/schbench/skyloft_rr50us/all.csv b/paper_results/schbench/skyloft_rr50us/all.csv new file mode 100644 index 0000000..9edb944 --- /dev/null +++ b/paper_results/schbench/skyloft_rr50us/all.csv @@ -0,0 +1,24 @@ +cores,wake99,rps50,lat99 +4,2,1170,2652 +8,2,3060,2652 +16,2,5464,2644 +24,3,8848,2676 +32,49,9456,5000 +40,50,9712,4824 +48,51,9872,5224 +56,98,9936,7208 +64,99,9872,7272 +72,101,9904,7432 +80,151,9904,9616 +88,150,9968,9584 +96,151,9904,9680 +112,201,9904,12048 +128,250,9904,14416 +144,252,9904,14480 +160,301,9904,16864 +176,351,9904,19168 +192,352,9904,19296 +208,401,9072,21600 +224,450,9904,23968 +240,452,9872,24032 +256,502,9872,26400 diff --git a/paper_results/synthetic/99.5-4-0.5-10000-lcbe/cfs-be b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/cfs-be new file mode 100644 index 0000000..36b22cf --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/cfs-be @@ -0,0 +1,9 @@ +30000,0,58925687,0.0117664,1,1843784573,0.36817,2,2637334284,0.526627,3,3395407049,0.678,4,4111396442,0.82097,5,4584692889,0.915478,6,4827210818,0.963905,7,4951190324,0.988661,8,5005846412,0.999575,9,5007666577,0.999938,10,5007935976,0.999992,11,5007871874,0.999979,12,5007902698,0.999986,13,5007972151,0.999999,14,5007954602,0.999996,15,5007889280,0.999983,16,5007902577,0.999986,17,5007959341,0.999997,18,5007941087,0.999993,19,5007906332,0.999986,Total,86502690973,0.863649 +70000,0,118498877,0.0234199,1,1151036934,0.227489,2,1420036071,0.280653,3,1784665278,0.352718,4,2232068435,0.441142,5,2839503782,0.561194,6,3346386946,0.661373,7,3918764774,0.774497,8,4280640241,0.846017,9,4612215298,0.911549,10,4884203710,0.965304,11,4998381466,0.98787,12,5036287594,0.995362,13,5028912930,0.993905,14,5059404668,0.999931,15,5059664004,0.999982,16,5059622144,0.999974,17,5059641522,0.999978,18,5059686539,0.999987,19,5049671018,0.998007,Total,75999292231,0.751018 +110000,0,195186794,0.0379011,1,980594981,0.190411,2,1102973165,0.214174,3,1227792087,0.238411,4,1481689844,0.287713,5,1793477313,0.348255,6,2072433651,0.402423,7,2547442116,0.494659,8,2968069997,0.576336,9,3576838269,0.694546,10,3948648363,0.766744,11,4438106414,0.861786,12,4573505137,0.888078,13,4825303861,0.936972,14,4989460583,0.968848,15,5078270846,0.986093,16,5125803365,0.995322,17,5128459601,0.995838,18,5139203913,0.997925,19,5139334961,0.99795,Total,66332595261,0.644019 +150000,0,246875050,0.0474399,1,865797443,0.166373,2,944595578,0.181515,3,1025341784,0.197031,4,1144044750,0.219842,5,1312137248,0.252142,6,1565901568,0.300906,7,1716148986,0.329778,8,2138008303,0.410843,9,2398780171,0.460954,10,2790022950,0.536135,11,3259716057,0.626393,12,3591270639,0.690105,13,4065454122,0.781224,14,4427748129,0.850844,15,4732530707,0.909411,16,4982398957,0.957426,17,5033357959,0.967219,18,5141551213,0.988009,19,5169391727,0.993359,Total,56551073341,0.543347 +190000,0,350054316,0.0661046,1,902536452,0.170436,2,909940357,0.171834,3,972967829,0.183736,4,1050875257,0.198448,5,1118758024,0.211267,6,1286360601,0.242918,7,1442190572,0.272345,8,1542012075,0.291195,9,1852230954,0.349777,10,2110103955,0.398474,11,2300360347,0.434403,12,2714380073,0.512586,13,3046699490,0.575342,14,3372666445,0.636898,15,3821261303,0.721611,16,4122373500,0.778473,17,4450537296,0.840444,18,4646013399,0.877358,19,4899524725,0.925232,Total,46911846970,0.442944 +230000,0,403345371,0.0762493,1,835680706,0.157979,2,847940058,0.160296,3,876348451,0.165667,4,911762137,0.172361,5,976547653,0.184609,6,1065539639,0.201432,7,1055824076,0.199595,8,1262074000,0.238585,9,1325840837,0.25064,10,1534047324,0.29,11,1659958580,0.313802,12,1868156761,0.35316,13,2064712973,0.390318,14,2404153407,0.454486,15,2590909047,0.489791,16,2967922653,0.561063,17,3457422248,0.653599,18,3786301947,0.715771,19,4007668462,0.757618,Total,35902156330,0.339351 +270000,0,455943508,0.0845561,1,753821837,0.139799,2,806241052,0.14952,3,795598861,0.147546,4,844490202,0.156613,5,880991874,0.163383,6,915928079,0.169862,7,920117274,0.170639,8,986656120,0.182978,9,1049000234,0.19454,10,1116062225,0.206977,11,1224720394,0.227128,12,1340703585,0.248638,13,1413772764,0.262189,14,1617179411,0.299911,15,1792989422,0.332515,16,2016918895,0.374044,17,2213857618,0.410567,18,2433114632,0.451229,19,2738258465,0.507819,Total,26316366452,0.244023 +280000,0,468902741,0.0871271,1,750159662,0.139388,2,741976632,0.137867,3,762386791,0.141659,4,802544137,0.149121,5,803280571,0.149258,6,835158660,0.155181,7,908424751,0.168795,8,925958347,0.172053,9,967159222,0.179708,10,1016760063,0.188925,11,1104178130,0.205168,12,1235582565,0.229584,13,1274887816,0.236888,14,1328471622,0.246844,15,1577462499,0.293109,16,1641013317,0.304918,17,1912308567,0.355327,18,2179174771,0.404914,19,2223035071,0.413063,Total,23458825935,0.217945 +290000,0,481101329,0.0896632,1,753135740,0.140362,2,770882097,0.14367,3,743262596,0.138522,4,809352697,0.15084,5,804489184,0.149933,6,827876230,0.154292,7,889307565,0.165741,8,921273704,0.171698,9,908014901,0.169227,10,995712207,0.185572,11,1072058073,0.1998,12,1171085565,0.218256,13,1263181292,0.23542,14,1335160030,0.248835,15,1476847941,0.275241,16,1650480529,0.307601,17,1718819543,0.320338,18,1931101694,0.359901,19,2163762846,0.403262,Total,22686905763,0.211409 diff --git a/paper_results/synthetic/99.5-4-0.5-10000-lcbe/cfs-lc b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/cfs-lc new file mode 100644 index 0000000..9fd370a --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/cfs-lc @@ -0,0 +1,8 @@ +89643,29904,6510,10602,14845,157697,10006771,10022056 +299277,99835,5747,10685,17498,29256,10007973,10034959 +419612,139981,5588,10680,19730,38511,10008630,10027583 +540137,180182,5427,10673,28780,1746200,10008892,11639482 +567938,189469,5436,10745,500069,10005116,10009057,12083004 +598295,199589,5358,10784,326013,2594736,10009128,11857211 +628683,209723,5442,10951,1036387,10005262,10009689,12463710 +636852,212455,5430,11040,1826887,10001999,10009927,13859711 diff --git a/paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-20us-be b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-20us-be new file mode 100644 index 0000000..ca4ec9f --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-20us-be @@ -0,0 +1,42 @@ +10000,0,4556336029,0.911262,1,4722089070,0.944412,2,4774618810,0.954918,3,4472584813,0.894512,4,4678884783,0.935771,5,4515035959,0.903002,6,4454348774,0.890864,7,4606348701,0.921264,8,4622089399,0.924412,9,4643093374,0.928613,10,4533728152,0.90674,11,4697899220,0.939574,12,4632404939,0.926475,13,4515574492,0.903109,14,4620847287,0.924164,15,4497093898,0.899413,16,4585444256,0.917083,17,4588869367,0.917768,18,4519860114,0.903967,19,4510740015,0.902143,Total,91747891452,0.917473 +20000,0,4373664469,0.874728,1,4373912977,0.874778,2,4423492929,0.884694,3,4440797654,0.888155,4,4427859353,0.885567,5,4410316419,0.882059,6,4363154508,0.872626,7,4420552972,0.884106,8,4428159663,0.885627,9,4359871697,0.87197,10,4421137999,0.884223,11,4410521146,0.882099,12,4435317608,0.887059,13,4412609989,0.882517,14,4340756079,0.868147,15,4431946085,0.886384,16,4382076271,0.876411,17,4457898149,0.891575,18,4375409026,0.875077,19,4438135030,0.887622,Total,88127590023,0.881271 +30000,0,4275292348,0.855055,1,4216978900,0.843392,2,4242850562,0.848566,3,4201143920,0.840225,4,4206823361,0.841361,5,4218152157,0.843627,6,4238838124,0.847764,7,4192816472,0.838559,8,4237933276,0.847583,9,4255893532,0.851175,10,4205089345,0.841014,11,4217597763,0.843516,12,4254114705,0.850819,13,4206945822,0.841385,14,4201917968,0.84038,15,4215101944,0.843017,16,4193459812,0.838688,17,4233044224,0.846605,18,4217021862,0.843401,19,4228077222,0.845612,Total,84459093319,0.844587 +40000,0,4040736991,0.808143,1,3997064428,0.799408,2,4017864071,0.803568,3,4007375346,0.801471,4,4006003914,0.801196,5,4010250264,0.802046,6,4008573988,0.80171,7,3984245460,0.796845,8,4036205339,0.807237,9,4015859932,0.803168,10,4026925030,0.805381,11,3989615620,0.797919,12,4041829862,0.808362,13,4006140140,0.801224,14,4016929574,0.803381,15,4036906550,0.807377,16,4001687143,0.800333,17,3998955106,0.799787,18,4032144048,0.806424,19,4044472188,0.80889,Total,80319784994,0.803193 +50000,0,3848445670,0.769685,1,3833984826,0.766793,2,3854072412,0.77081,3,3823491706,0.764694,4,3821523445,0.7643,5,3850687372,0.770133,6,3830948338,0.766185,7,3846081889,0.769212,8,3832115778,0.766419,9,3849458580,0.769887,10,3838186535,0.767633,11,3822394151,0.764475,12,3818462529,0.763688,13,3823154537,0.764627,14,3837295800,0.767455,15,3835377057,0.767071,16,3843208147,0.768637,17,3834321658,0.76686,18,3836375556,0.767271,19,3840823643,0.76816,Total,76720409629,0.7672 +60000,0,3676503369,0.735297,1,3672062244,0.734409,2,3674469637,0.73489,3,3674691178,0.734934,4,3674649046,0.734926,5,3666956367,0.733387,6,3691145598,0.738225,7,3691132753,0.738223,8,3672188076,0.734434,9,3690039711,0.738004,10,3690478791,0.738092,11,3660878160,0.732172,12,3697224081,0.739441,13,3661139015,0.732224,14,3687850143,0.737566,15,3693807211,0.738758,16,3701801616,0.740356,17,3670898274,0.734176,18,3693350468,0.738666,19,3688939227,0.737784,Total,73630204965,0.736298 +70000,0,3474773425,0.694951,1,3480281588,0.696052,2,3475628110,0.695122,3,3465124312,0.693021,4,3481035093,0.696203,5,3474697885,0.694936,6,3492768583,0.69855,7,3490499746,0.698096,8,3475538132,0.695104,9,3487790029,0.697554,10,3476909938,0.695378,11,3467322180,0.69346,12,3479488911,0.695894,13,3456894699,0.691375,14,3466280779,0.693252,15,3477037596,0.695403,16,3469868405,0.69397,17,3484986776,0.696993,18,3458189195,0.691634,19,3482250501,0.696446,Total,69517365883,0.69517 +80000,0,3320542163,0.664105,1,3310053644,0.662007,2,3320045344,0.664006,3,3299863504,0.659969,4,3333500697,0.666697,5,3355887310,0.671174,6,3328471392,0.665691,7,3323226185,0.664642,8,3332034749,0.666404,9,3348284434,0.669653,10,3331874895,0.666372,11,3323946413,0.664786,12,3325425614,0.665082,13,3305166136,0.66103,14,3326277814,0.665252,15,3341803848,0.668357,16,3318158123,0.663628,17,3331518163,0.6663,18,3339079177,0.667812,19,3312108547,0.662418,Total,66527268152,0.665269 +90000,0,3174311321,0.63486,1,3157445220,0.631487,2,3166481293,0.633294,3,3166484952,0.633295,4,3188925415,0.637783,5,3120865506,0.624171,6,3179813965,0.635961,7,3187799712,0.637558,8,3180760415,0.63615,9,3185616208,0.637121,10,3168416407,0.633681,11,3125489209,0.625096,12,3184457326,0.636889,13,3167133454,0.633424,14,3167882522,0.633574,15,3173561512,0.63471,16,3148927673,0.629783,17,3182120907,0.636422,18,3143680290,0.628734,19,3150428577,0.630083,Total,63320601884,0.633204 +100000,0,2977878068,0.595573,1,2920595191,0.584116,2,2905794481,0.581156,3,2975058877,0.595009,4,2958254525,0.591648,5,2933532511,0.586704,6,2974524181,0.594902,7,2942451878,0.588487,8,2938747639,0.587747,9,2883892324,0.576776,10,2930091593,0.586015,11,2972076243,0.594412,12,2959109333,0.591819,13,2950490255,0.590095,14,2940962654,0.58819,15,2949350324,0.589867,16,2951394970,0.590276,17,2974579394,0.594913,18,2958704648,0.591738,19,2964651576,0.592927,Total,58962140665,0.589618 +110000,0,2778040179,0.555605,1,2804717228,0.560941,2,2862174487,0.572432,3,2830814306,0.56616,4,2780636468,0.556124,5,2809056904,0.561808,6,2850843712,0.570166,7,2837946377,0.567586,8,2818740233,0.563745,9,2822351237,0.564467,10,2822828580,0.564563,11,2808523718,0.561702,12,2789266430,0.55785,13,2813352215,0.562667,14,2830143878,0.566026,15,2808202881,0.561638,16,2738020200,0.547601,17,2850646554,0.570126,18,2736741268,0.547345,19,2772805737,0.554558,Total,56165852592,0.561656 +120000,0,2599646376,0.519926,1,2686754874,0.537348,2,2635540241,0.527105,3,2609385574,0.521874,4,2721098561,0.544217,5,2562759671,0.512549,6,2688850090,0.537767,7,2617350485,0.523467,8,2599095539,0.519816,9,2602763198,0.52055,10,2645827550,0.529162,11,2617265821,0.52345,12,2659231137,0.531843,13,2566864890,0.51337,14,2664377989,0.532873,15,2612773506,0.522552,16,2705591641,0.541115,17,2644532913,0.528904,18,2570514244,0.5141,19,2614039318,0.522805,Total,52624263618,0.52624 +130000,0,2488838153,0.497765,1,2511195974,0.502237,2,2513331153,0.502664,3,2406709719,0.481339,4,2390815516,0.478161,5,2420291501,0.484056,6,2452282327,0.490454,7,2500639716,0.500125,8,2466554209,0.493308,9,2464194965,0.492836,10,2480441244,0.496086,11,2497916560,0.499581,12,2486717724,0.497341,13,2435482870,0.487094,14,2459899445,0.491977,15,2528608411,0.505719,16,2522989975,0.504595,17,2514967510,0.502991,18,2456980529,0.491394,19,2537314577,0.50746,Total,49536172078,0.495359 +140000,0,2206978329,0.441393,1,2328540745,0.465706,2,2268273089,0.453652,3,2265106605,0.453019,4,2206536619,0.441305,5,2305765295,0.461151,6,2337505806,0.467499,7,2259527149,0.451903,8,2319292038,0.463856,9,2329408049,0.465879,10,2245317626,0.449061,11,2248125918,0.449623,12,2317175922,0.463433,13,2285667548,0.457131,14,2329748490,0.465947,15,2224796278,0.444957,16,2245659723,0.44913,17,2231860485,0.44637,18,2352359148,0.470469,19,2197297211,0.439457,Total,45504942073,0.455047 +150000,0,2145248419,0.429047,1,2134195200,0.426837,2,2120428685,0.424084,3,2056211173,0.41124,4,2112941090,0.422586,5,2157743072,0.431546,6,2080175085,0.416033,7,2129400524,0.425878,8,2092724381,0.418543,9,2256622370,0.451322,10,2165639441,0.433126,11,2145312402,0.42906,12,2156839074,0.431366,13,2139733810,0.427945,14,2065829190,0.413164,15,2197548681,0.439507,16,2239613695,0.44792,17,2087434822,0.417485,18,2043919394,0.408782,19,2064207359,0.412839,Total,42591767867,0.425915 +160000,0,1968680353,0.393734,1,1920081809,0.384015,2,1911517637,0.382302,3,2022488474,0.404496,4,2021879394,0.404374,5,1973754711,0.394749,6,1991325550,0.398263,7,2093899139,0.418778,8,1858416808,0.371682,9,1962525533,0.392503,10,2095593855,0.419117,11,1991306553,0.39826,12,1973694304,0.394737,13,1984084204,0.396815,14,2035563683,0.407111,15,1969696235,0.393938,16,1978837190,0.395766,17,1933445331,0.386687,18,2023082354,0.404615,19,1991251428,0.398249,Total,39701124545,0.39701 +170000,0,1873371291,0.374673,1,1835275804,0.367054,2,1787435709,0.357486,3,1751764510,0.350351,4,1896600230,0.379319,5,1839691985,0.367937,6,1818256620,0.36365,7,1839255216,0.36785,8,1755146053,0.351028,9,1901585789,0.380316,10,1748212805,0.349641,11,1836488879,0.367296,12,1770498841,0.354098,13,1844619095,0.368922,14,1866254069,0.373249,15,1913915395,0.382782,16,1851349134,0.370268,17,1809391235,0.361877,18,1916180525,0.383235,19,1856425947,0.371284,Total,36711719132,0.367116 +180000,0,1727430584,0.345484,1,1619848455,0.323968,2,1556339922,0.311266,3,1766427824,0.353284,4,1592179679,0.318434,5,1621996009,0.324398,6,1620530766,0.324105,7,1658347334,0.331668,8,1632067799,0.326412,9,1716220889,0.343243,10,1596505113,0.319299,11,1666331278,0.333265,12,1682208840,0.33644,13,1687191578,0.337437,14,1705905493,0.341179,15,1649606874,0.32992,16,1636921671,0.327383,17,1713541033,0.342707,18,1704801261,0.340959,19,1770229841,0.354044,Total,33324632243,0.333245 +190000,0,1606948804,0.321388,1,1449596329,0.289918,2,1645656182,0.329129,3,1617206706,0.323439,4,1504781297,0.300955,5,1438143249,0.287627,6,1560816172,0.312161,7,1484247402,0.296848,8,1541568007,0.308312,9,1552373376,0.310473,10,1376097707,0.275218,11,1539605547,0.307919,12,1529489086,0.305896,13,1482395498,0.296477,14,1590523803,0.318103,15,1552574063,0.310513,16,1575964074,0.315191,17,1442350265,0.288468,18,1429323775,0.285863,19,1537648230,0.307528,Total,30457309572,0.304571 +200000,0,1403687954,0.280736,1,1270259476,0.254051,2,1241913004,0.248382,3,1390806741,0.27816,4,1173614255,0.234722,5,1309209632,0.261841,6,1307343502,0.261468,7,1429464710,0.285892,8,1192984029,0.238596,9,1406119190,0.281223,10,1430189548,0.286037,11,1314036063,0.262806,12,1342780139,0.268555,13,1322036694,0.264406,14,1213485104,0.242696,15,1422551178,0.284509,16,1227529531,0.245505,17,1314412829,0.262881,18,1387711157,0.277541,19,1227822103,0.245563,Total,26327956839,0.263278 +210000,0,1271019344,0.254203,1,1182285477,0.236456,2,1345331076,0.269065,3,1125485459,0.225096,4,1009569076,0.201913,5,1294331867,0.258865,6,1171630756,0.234325,7,1069195919,0.213838,8,1286243240,0.257248,9,1234155981,0.24683,10,1169656080,0.23393,11,1177159847,0.235431,12,1186766886,0.237352,13,1306903297,0.261379,14,1386356971,0.27727,15,1208749576,0.241749,16,1122755505,0.22455,17,1223144309,0.244628,18,1059880080,0.211975,19,1225249115,0.245049,Total,24055869861,0.240558 +220000,0,1177617485,0.235522,1,1035185006,0.207036,2,1063202823,0.21264,3,1100171696,0.220033,4,988604736,0.19772,5,1040939233,0.208187,6,1107953471,0.22159,7,1023693528,0.204738,8,1170861746,0.234171,9,1102641054,0.220527,10,1208015291,0.241602,11,928310514,0.185661,12,1116031873,0.223205,13,915028466,0.183005,14,1105490329,0.221097,15,1018216660,0.203642,16,1041591049,0.208317,17,1078747003,0.215748,18,1044081183,0.208815,19,1034189398,0.206837,Total,21300572544,0.213005 +230000,0,860420470,0.172083,1,702967466,0.140593,2,909181059,0.181835,3,869942823,0.173988,4,619811598,0.123962,5,891748600,0.178349,6,985301235,0.197059,7,959330794,0.191865,8,926378201,0.185275,9,730215164,0.146042,10,914573327,0.182914,11,898333740,0.179666,12,877245992,0.175448,13,762549397,0.152509,14,752636111,0.150526,15,768377770,0.153675,16,859148769,0.171829,17,897435513,0.179486,18,924516839,0.184902,19,898414429,0.179682,Total,17008529297,0.170084 +240000,0,597884429,0.119576,1,697850577,0.139569,2,530692174,0.106138,3,911091789,0.182217,4,685039310,0.137007,5,680441770,0.136088,6,766530903,0.153305,7,844725399,0.168944,8,835687676,0.167137,9,568003594,0.1136,10,870822299,0.174164,11,652640267,0.130527,12,546030296,0.109205,13,835820719,0.167163,14,723690037,0.144737,15,760683993,0.152136,16,694257372,0.138851,17,845226337,0.169044,18,785932594,0.157186,19,421510654,0.0843017,Total,14254562189,0.142545 +250000,0,496777112,0.0993549,1,605555860,0.121111,2,570038054,0.114007,3,567223284,0.113444,4,542728793,0.108545,5,395772534,0.0791541,6,541243704,0.108248,7,348127079,0.0696251,8,598673831,0.119734,9,526958225,0.105391,10,547426171,0.109485,11,605975727,0.121195,12,582061391,0.116412,13,497794527,0.0995584,14,473221994,0.0946439,15,519953242,0.10399,16,661112300,0.132222,17,558870621,0.111774,18,508844452,0.101768,19,670422170,0.134084,Total,10818781071,0.108187 +252500,0,463834160,0.0927664,1,521826591,0.104365,2,703189421,0.140637,3,674469686,0.134893,4,618779847,0.123755,5,656295269,0.131258,6,597227697,0.119445,7,669530959,0.133906,8,436029059,0.0872054,9,629610623,0.125922,10,448134015,0.0896264,11,513938570,0.102787,12,589569340,0.117913,13,425466241,0.0850928,14,594852070,0.11897,15,574697467,0.114939,16,473703127,0.0947402,17,604636945,0.120927,18,481828893,0.0963653,19,320292478,0.0640582,Total,10997912458,0.109979 +255000,0,601667861,0.120333,1,404336182,0.0808669,2,527827771,0.105565,3,495260210,0.0990517,4,472123908,0.0944244,5,538305904,0.107661,6,526062565,0.105212,7,405985674,0.0811968,8,436148571,0.0872294,9,528865634,0.105773,10,552751418,0.11055,11,495381206,0.0990759,12,410502261,0.0821001,13,536522579,0.107304,14,422630398,0.0845258,15,532891994,0.106578,16,406242295,0.0812481,17,527160886,0.105432,18,582234004,0.116446,19,333031340,0.066606,Total,9735932661,0.0973589 +257500,0,405419173,0.0810835,1,454803940,0.0909605,2,437741390,0.087548,3,428918988,0.0857835,4,364349325,0.0728696,5,421935725,0.0843868,6,502523843,0.100504,7,426529142,0.0853055,8,388913085,0.0777823,9,522151967,0.10443,10,452539850,0.0905076,11,327889397,0.0655776,12,241375864,0.048275,13,481613201,0.0963223,14,262834584,0.0525667,15,229932031,0.0459862,16,423055386,0.0846108,17,439935993,0.0879869,18,529344176,0.105868,19,364650709,0.0729299,Total,8106457769,0.0810643 +260000,0,390592484,0.0781181,1,454715542,0.0909427,2,533949017,0.106789,3,303883610,0.0607764,4,365288135,0.0730573,5,437738623,0.0875473,6,466882235,0.093376,7,513197367,0.102639,8,344172855,0.0688342,9,543519718,0.108703,10,501625870,0.100325,11,451235419,0.0902466,12,395704005,0.0791404,13,395979266,0.0791955,14,387327298,0.0774651,15,521491573,0.104298,16,476139103,0.0952274,17,375209450,0.0750415,18,518566727,0.103713,19,395221632,0.0790439,Total,8772439929,0.087724 +262500,0,283647396,0.0567292,1,228255093,0.0456508,2,259469784,0.0518937,3,362023336,0.0724043,4,298989532,0.0597976,5,243783003,0.0487563,6,414437192,0.082887,7,325440352,0.0650877,8,304586521,0.060917,9,310672045,0.0621341,10,247705418,0.0495408,11,318988906,0.0637974,12,211218588,0.0422435,13,362404047,0.0724804,14,350757725,0.0701512,15,264767157,0.0529531,16,280343106,0.0560683,17,263946533,0.052789,18,103152535,0.0206304,19,192900550,0.0385799,Total,5627488819,0.0562746 +265000,0,332779762,0.0665557,1,244639872,0.0489278,2,312476656,0.0624951,3,246559186,0.0493117,4,280086506,0.0560171,5,257332340,0.0514663,6,272537335,0.0545073,7,239321099,0.047864,8,229989577,0.0459977,9,321521019,0.064304,10,240329723,0.0480658,11,222488035,0.0444974,12,270426523,0.0540851,13,158921744,0.0317842,14,304015079,0.0608028,15,227822840,0.0455644,16,332085050,0.0664168,17,226588098,0.0453174,18,81420164,0.016284,19,219466753,0.0438932,Total,5020807361,0.0502079 +267500,0,266865368,0.0533729,1,293058289,0.0586114,2,224556491,0.0449111,3,291918308,0.0583834,4,133814129,0.0267627,5,327581046,0.065516,6,274650421,0.0549299,7,308433063,0.0616864,8,240381281,0.0480761,9,288637341,0.0577272,10,297349928,0.0594698,11,300760683,0.0601519,12,304969417,0.0609937,13,270500350,0.0540999,14,129480777,0.0258961,15,317219292,0.0634436,16,376945657,0.0753888,17,202701420,0.0405401,18,222939519,0.0445877,19,234953988,0.0469906,Total,5307716768,0.053077 +270000,0,199850451,0.0399699,1,150381072,0.0300761,2,216151812,0.0432302,3,220364047,0.0440726,4,214077039,0.0428152,5,196784683,0.0393568,6,185496911,0.0370992,7,307467989,0.0614934,8,270693002,0.0541384,9,231811161,0.046362,10,147726012,0.0295451,11,137097594,0.0274194,12,275313667,0.0550625,13,221492780,0.0442984,14,169090151,0.0338179,15,139288925,0.0278577,16,79741493,0.0159482,17,167104099,0.0334207,18,205296653,0.0410592,19,136992343,0.0273984,Total,3872221884,0.0387221 +272500,0,166698645,0.0333396,1,198744671,0.0397487,2,199342196,0.0398682,3,193054371,0.0386107,4,215925216,0.0431848,5,162869606,0.0325738,6,194027433,0.0388053,7,228178633,0.0456355,8,23878512,0.00477568,9,183079161,0.0366157,10,135936723,0.0271872,11,48202890,0.00964053,12,182043176,0.0364085,13,166450204,0.0332899,14,116595734,0.023319,15,2858932,0.000571784,16,132961693,0.0265922,17,118884581,0.0237768,18,145042092,0.0290083,19,28483556,0.00569668,Total,2843258025,0.0284324 +275000,0,148118922,0.0296236,1,112080386,0.022416,2,183982972,0.0367964,3,114236969,0.0228473,4,147147685,0.0294294,5,79339700,0.0158679,6,75318857,0.0150637,7,62013244,0.0124026,8,105011133,0.0210021,9,138425087,0.0276849,10,107417744,0.0214834,11,142647679,0.0285294,12,161786311,0.0323571,13,134558488,0.0269115,14,172230848,0.034446,15,152524381,0.0305047,16,102477966,0.0204955,17,109650901,0.0219301,18,0,0,19,0,0,Total,2248969273,0.0224896 +277500,0,28275038,0.00565498,1,61135145,0.012227,2,35854880,0.00717094,3,34978024,0.00699557,4,32459297,0.00649183,5,50006592,0.0100013,6,46371792,0.00927431,7,56143026,0.0112285,8,46707160,0.00934138,9,39832418,0.00796644,10,67183371,0.0134366,11,72659562,0.0145318,12,32957056,0.00659138,13,50654403,0.0101308,14,56021874,0.0112043,15,85403161,0.0170805,16,40696752,0.00813931,17,66860733,0.0133721,18,42180231,0.008436,19,38216666,0.00764329,Total,984597181,0.00984592 +280000,0,77739019,0.0155477,1,118602376,0.0237204,2,55281808,0.0110563,3,71760546,0.0143521,4,94227555,0.0188454,5,52084713,0.0104169,6,108056432,0.0216112,7,111349187,0.0222697,8,102301774,0.0204603,9,47890621,0.00957809,10,104461913,0.0208923,11,112584329,0.0225168,12,95265957,0.0190531,13,107538898,0.0215077,14,100798351,0.0201596,15,98610173,0.019722,16,43901371,0.00878024,17,60438797,0.0120877,18,61275300,0.012255,19,37743453,0.00754866,Total,1661912573,0.0166191 +282500,0,74661114,0.0149322,1,79878152,0.0159756,2,108885351,0.021777,3,150052343,0.0300104,4,73136700,0.0146273,5,123706118,0.0247411,6,110706607,0.0221412,7,113988825,0.0227977,8,129753341,0.0259506,9,83025815,0.0166051,10,99008863,0.0198017,11,86242262,0.0172484,12,101336618,0.0202672,13,167907008,0.0335813,14,110056295,0.0220112,15,46419159,0.0092838,16,67101547,0.0134203,17,71478717,0.0142957,18,102333389,0.0204666,19,77301473,0.0154602,Total,1976979697,0.0197697 +285000,0,29104315,0.00582084,1,20077201,0.00401542,2,10315995,0.00206319,3,30234001,0.00604678,4,25865115,0.005173,5,17166359,0.00343326,6,28731719,0.00574632,7,7576618,0.00151532,8,23392187,0.00467842,9,19910382,0.00398206,10,22453890,0.00449076,11,16257743,0.00325154,12,3238458,0.000647689,13,14000051,0.0028,14,15219735,0.00304394,15,8142646,0.00162852,16,0,0,17,0,0,18,0,0,19,0,0,Total,291686415,0.00291685 +287500,0,7761039,0.0015522,1,593276,0.000118654,2,18182421,0.00363646,3,14586170,0.00291722,4,6563427,0.00131268,5,19861057,0.00397219,6,23481145,0.0046962,7,16775242,0.00335503,8,3482741,0.000696544,9,16432808,0.00328654,10,18541371,0.00370825,11,12150748,0.00243013,12,6466605,0.00129331,13,8000919,0.00160017,14,10525120,0.00210501,15,15172214,0.00303442,16,163004,3.26006e-05,17,2077314,0.00041546,18,767583,0.000153516,19,0,0,Total,201584204,0.00201583 +290000,0,4757712,0.000951537,1,6919036,0.0013838,2,3702336,0.000740463,3,3659089,0.000731814,4,2920001,0.000583997,5,5567550,0.0011135,6,4249537,0.000849903,7,3598349,0.000719666,8,1218991,0.000243797,9,0,0,10,0,0,11,0,0,12,0,0,13,0,0,14,0,0,15,0,0,16,0,0,17,0,0,18,0,0,19,0,0,Total,36592601,0.000365924 +292500,0,70704,1.41407e-05,1,836523,0.000167304,2,432385,8.64767e-05,3,204344,4.08686e-05,4,0,0,5,0,0,6,0,0,7,0,0,8,0,0,9,0,0,10,0,0,11,0,0,12,0,0,13,0,0,14,0,0,15,0,0,16,0,0,17,0,0,18,0,0,19,0,0,Total,1543956,1.54395e-05 diff --git a/paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-20us-lc b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-20us-lc new file mode 100644 index 0000000..4bedc28 --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-20us-lc @@ -0,0 +1,42 @@ +29847,9948,4960,16035,30539,10025144,10230317,10702867 +59820,19939,4903,17458,34670,42602,10564537,11282585 +89669,29889,4871,18847,39686,76856,10938889,11859919 +119932,39976,4893,19220,42943,90064,11232528,12674945 +150139,50045,4855,21279,46304,10058532,11426042,12473754 +180407,60134,4868,21625,47659,60643,11515988,12625001 +209323,69773,4861,21878,49835,10042137,11778567,13064944 +240286,80094,4865,22233,50817,10057230,11784328,13118509 +270634,90210,4859,21557,51442,62284,11929162,13274860 +300329,100108,4869,21230,52484,10044006,12025574,13106752 +328897,109630,4846,20424,52402,10054220,11973202,13279695 +359877,119957,4840,20082,53038,156482,12016186,13205330 +390530,130175,4854,19554,53347,76143,12018494,13515685 +420738,140244,4848,18920,53156,10096730,11962995,13202987 +450647,150213,4864,18383,52319,10181986,11992635,13292932 +479942,159978,4850,18241,52257,252911,11981704,13279744 +510187,170060,4851,18049,51841,10125153,11926726,13032498 +540334,180109,4856,17871,51780,232818,11960654,13165922 +571316,190436,4822,17547,51366,263618,11997213,13411915 +600497,200163,4866,17330,51380,10234113,12101310,17584143 +629405,209799,4855,17208,51328,10001720,12153790,15898925 +659785,219925,4846,17000,51017,204701,12260695,20857045 +690030,230007,4821,16554,53320,1398744,12720367,35335289 +719853,239948,4843,16200,54906,10258129,13821560,32131329 +750635,250208,4863,15151,61915,10451041,17546717,183184048 +757266,252418,4834,15077,62646,10093883,18834118,114404179 +765457,255149,4850,15248,57742,1736001,15303821,86283787 +771923,257304,4845,14409,62729,10170140,17720099,173790358 +780433,260141,4849,14214,64706,10185264,18533935,194278556 +787350,262446,4824,6825,67230,10552079,19116689,110541270 +795482,265157,4849,6165,74663,10995752,20670734,159470939 +803266,267752,4843,6184,75860,10811335,20025808,206469338 +809717,269902,4832,6147,77064,10704412,20801823,197491658 +817122,272370,4840,6034,81535,11109823,21712700,266281871 +824946,274978,4837,5949,129884,11753595,25120636,804243547 +833097,277695,4816,5917,142323,12223174,26194524,1437879015 +840362,280117,4857,5919,133762,11643518,26919994,813119486 +848094,282694,4843,5880,3520236,15725318,27224899,840662508 +854393,284794,4842,5781,1860384,17009155,28874464,965724077 +862763,287583,4807,5772,1788857,18098018,32215155,1933080554 +870426,290138,4848,5756,1620823,17891225,32610868,2601059629 +878425,292804,4841,5790,3414663,17324787,30940981,2771815531 \ No newline at end of file diff --git a/paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-30us-be b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-30us-be new file mode 100644 index 0000000..9f99972 --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-30us-be @@ -0,0 +1,10 @@ +30000,0,4193607471,0.838716,1,4233500787,0.846694,2,4222513613,0.844497,3,4213363459,0.842667,4,4212019515,0.842398,5,4221656194,0.844325,6,4205741445,0.841142,7,4209687945,0.841932,8,4200235628,0.840041,9,4205081083,0.84101,10,4244420887,0.848878,11,4247793880,0.849553,12,4213568893,0.842708,13,4205935199,0.841181,14,4220739077,0.844142,15,4197882030,0.83957,16,4205348406,0.841064,17,4220007291,0.843995,18,4202576190,0.840509,19,4213875965,0.842769,Total,84289554958,0.84289 +70000,0,3528014213,0.705598,1,3532787335,0.706553,2,3509358205,0.701867,3,3524520881,0.704899,4,3536654201,0.707326,5,3541442793,0.708284,6,3546985068,0.709392,7,3522435204,0.704482,8,3537196474,0.707435,9,3505908064,0.701177,10,3535496749,0.707095,11,3490011046,0.697998,12,3524219950,0.704839,13,3510153022,0.702026,14,3522289419,0.704453,15,3526797067,0.705355,16,3541121946,0.70822,17,3522709593,0.704537,18,3533539657,0.706703,19,3517529494,0.703501,Total,70509170381,0.705087 +110000,0,2813600943,0.562717,1,2811577036,0.562312,2,2867910168,0.573579,3,2863544299,0.572706,4,2739547858,0.547907,5,2804452014,0.560887,6,2892419026,0.578481,7,2792344377,0.558466,8,2803938051,0.560785,9,2816995741,0.563396,10,2839408092,0.567879,11,2816587909,0.563315,12,2831592626,0.566315,13,2865905225,0.573178,14,2919635888,0.583924,15,2800177969,0.560033,16,2850471250,0.570091,17,2836178380,0.567233,18,2893710763,0.578739,19,2839289415,0.567855,Total,56699287030,0.56699 +150000,0,2166779748,0.433353,1,2216912215,0.443379,2,2154817759,0.43096,3,2060037434,0.412005,4,2111156836,0.422228,5,2224151498,0.444827,6,2280698604,0.456136,7,2220382607,0.444073,8,2042330480,0.408463,9,2022109185,0.404419,10,2077221988,0.415441,11,2117320599,0.423461,12,2168773383,0.433752,13,2080373867,0.416072,14,2140048767,0.428007,15,2043035206,0.408604,16,2048152052,0.409628,17,2112566253,0.42251,18,2027591454,0.405515,19,2181000859,0.436197,Total,42495460794,0.424952 +190000,0,1711717298,0.342342,1,1626898934,0.325378,2,1454435677,0.290886,3,1352354751,0.27047,4,1640383107,0.328075,5,1694838295,0.338966,6,1476198246,0.295238,7,1539133308,0.307825,8,1522612611,0.304521,9,1420668328,0.284132,10,1491941419,0.298387,11,1434810777,0.286961,12,1504104611,0.300819,13,1575584186,0.315115,14,1647130462,0.329424,15,1410388459,0.282076,16,1557342064,0.311467,17,1474919372,0.294982,18,1505876789,0.301174,19,1421369453,0.284272,Total,30462708147,0.304626 +230000,0,976735396,0.195346,1,851848412,0.170369,2,956945826,0.191388,3,926755998,0.18535,4,930231968,0.186046,5,1085173896,0.217034,6,1013365100,0.202672,7,909080790,0.181815,8,862038278,0.172407,9,984003905,0.1968,10,898039347,0.179607,11,885769306,0.177153,12,1009505302,0.2019,13,837106601,0.167421,14,823310821,0.164661,15,929233299,0.185846,16,1078840307,0.215767,17,946339484,0.189267,18,726908588,0.145381,19,934266453,0.186853,Total,18565499077,0.185654 +270000,0,221843960,0.0443686,1,226086658,0.0452172,2,271590382,0.0543179,3,223125929,0.044625,4,161403676,0.0322806,5,208330792,0.041666,6,181269512,0.0362538,7,207198922,0.0414396,8,186353063,0.0372705,9,126263355,0.0252526,10,93965616,0.0187931,11,230415345,0.0460829,12,154372977,0.0308745,13,232538604,0.0465075,14,218345201,0.0436689,15,262362024,0.0524722,16,256191192,0.051238,17,214759126,0.0429517,18,209970055,0.0419939,19,209859775,0.0419718,Total,4096246164,0.0409623 +280000,0,143870560,0.028774,1,58105141,0.011621,2,111989045,0.0223977,3,3997215,0.00079944,4,5226245,0.00104524,5,46326262,0.00926522,6,88227500,0.0176454,7,98402098,0.0196803,8,104460847,0.0208921,9,73424736,0.0146849,10,90676462,0.0181352,11,65232352,0.0130464,12,131074842,0.0262149,13,123396020,0.0246791,14,140016321,0.0280032,15,99810939,0.0199621,16,79676830,0.0159353,17,95524027,0.0191047,18,0,0,19,0,0,Total,1559437442,0.0155943 +290000,0,3600382,0.000720073,1,18500048,0.00369999,2,9184644,0.00183692,3,17548629,0.00350971,4,20295397,0.00405906,5,6380736,0.00127614,6,6433621,0.00128672,7,5676743,0.00113534,8,13569549,0.0027139,9,16560299,0.00331204,10,10568697,0.00211373,11,6404786,0.00128095,12,12910388,0.00258206,13,17345928,0.00346917,14,13720099,0.00274401,15,326140,6.52277e-05,16,13111331,0.00262225,17,10758957,0.00215178,18,0,0,19,0,0,Total,202896374,0.00202895 +292500,0,5063657,0.00101273,1,2999460,0.000599889,2,4612957,0.000922587,3,3876444,0.000775285,4,4889145,0.000977825,5,134464,2.68927e-05,6,805082,0.000161016,7,1898971,0.000379793,8,3453142,0.000690625,9,0,0,10,0,0,11,0,0,12,0,0,13,0,0,14,0,0,15,0,0,16,0,0,17,0,0,18,0,0,19,0,0,Total,27733322,0.000277332 diff --git a/paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-30us-lc b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-30us-lc new file mode 100644 index 0000000..84828cc --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/ghost-30us-lc @@ -0,0 +1,11 @@ +89348,29782,4870,19036,38115,10089170,10822564,11632355 +209792,69929,4871,21611,45018,57229,11326119,12167519 +330292,110095,4860,20221,48141,132118,11446556,12319191 +449461,149818,4858,18474,49126,10186390,11562125,12458289 +568774,189588,4840,17717,49808,10024441,11692614,14482103 +690752,230247,4841,16655,50633,10078264,12263168,41508750 +764789,254926,4844,15568,56307,4480747,14542408,122790750 +794823,264937,4843,14678,73010,5603814,17108049,143823903 +826682,275557,4821,6178,168980,11270347,20851512,425329298 +854349,284779,4834,5921,272681,13728761,27485406,978787171 +871430,290472,4836,6127,5178838,16630081,30128943,1545298186 \ No newline at end of file diff --git a/paper_results/synthetic/99.5-4-0.5-10000-lcbe/shinjuku-30us-be b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/shinjuku-30us-be new file mode 100644 index 0000000..1a36ee4 --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/shinjuku-30us-be @@ -0,0 +1,12 @@ +29999,0 +69962,0 +109989,0 +150038,0 +190385,0 +230070,0 +270402,0 +310281,0 +320109,0 +329713,0 +340137,0 +349288,0 diff --git a/paper_results/synthetic/99.5-4-0.5-10000-lcbe/shinjuku-30us-lc b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/shinjuku-30us-lc new file mode 100644 index 0000000..700f6ec --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/shinjuku-30us-lc @@ -0,0 +1,13 @@ +30000,29999,4713,4787,4834,4891,6225,10610735,10626892,10650103 +70000,69962,4715,4789,4840,4910,5675,10611776,10628972,10646236 +110000,109989,4717,4795,4854,4945,6959,10608365,10625821,10932359 +150000,150038,4721,4806,4875,4977,7673,10607314,10623252,10633899 +190000,190385,4727,4821,4901,5017,11197,10607467,10841680,11927851 +230000,230070,4728,4828,4928,5093,19019,10624469,11183465,12097218 +270000,270402,4748,4889,5200,14027,29739,11380347,15181591,18920945 +310000,310281,4795,9689,22223,31216,61107,16315424,23274043,28525285 +320000,320109,4804,12164,24606,45201,78337,17859524,30369548,32426632 +330000,329713,4949,35150,74658,110328,164398,37084518,59840553,60697124 +340000,340137,63468,126907,267910,433889,509132,116418830,172508779,173589330 +350000,349288,451411,653200,741644,790108,867630,262100710,286912079,289605853 +360000,360583,644822,1387142,1974583,2163931,2037185,2074412,713443270,716300623 \ No newline at end of file diff --git a/paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-20us-be b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-20us-be new file mode 100644 index 0000000..f994f37 --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-20us-be @@ -0,0 +1,14 @@ +37055.500,0.8175929 +74066.625,0.6401557 +111176.500,0.5067477 +148244.500,0.3865992 +185561.750,0.2845704 +222145.375,0.1241204 +259327.250,0.0248369 +296465.750,0.0073026 +303930.875,0.0044186 +311269.750,0.0050615 +318766.125,0.0054401 +326062.750,0.0050095 +333297.375,0.0054341 +340601.625,0.0037325 \ No newline at end of file diff --git a/paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-20us-lc b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-20us-lc new file mode 100644 index 0000000..272483a --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-20us-lc @@ -0,0 +1,14 @@ +37050.760,37055.500,4848,5210,16648,24884,10513403,14956622 +74101.519,74066.625,4867,5275,18138,25701,10639993,15371107 +111152.279,111176.500,4805,5354,20719,30347,10853758,17893622 +148203.038,148244.500,4885,5521,20702,37950,11008996,20486281 +185253.798,185561.750,4888,5620,20621,27520,11286377,17748482 +222304.557,222145.375,4801,5786,21312,30506,11434056,18385003 +259355.317,259327.250,4784,6073,23289,32219,11922275,19921134 +296406.076,296177.125,4876,8663,63146,78568,14916199,37795916 +303816.228,303930.875,4822,9862,51390,72897,16081540,33322716 +311226.380,311269.750,4829,14460,97006,10976232,23175673,53224252 +318636.532,318766.125,4908,22101,144468,156664,32432034,75424665 +326046.684,326062.750,4854,124130,288789,467120,115983400,141606892 +333456.836,333297.375,326238,833078,1338227,1350747,523432829,636596381 +340866.988,340601.625,342723,1066433,1747358,1760285,691692754,831916975 diff --git a/paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-30us-be b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-30us-be new file mode 100644 index 0000000..dabb25d --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-30us-be @@ -0,0 +1,10 @@ +36943.125,0.7534810 +74134.375,0.6462380 +111033.750,0.5389577 +148254.500,0.4459542 +184971.375,0.2957168 +221936.500,0.2163761 +259488.000,0.0419616 +296341.250,0.0247683 +303628.500,0.0105062 +340919.500,0.0096001 diff --git a/paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-30us-lc b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-30us-lc new file mode 100644 index 0000000..126feb1 --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000-lcbe/skyloft-30us-lc @@ -0,0 +1,15 @@ +37050.760,37140.250,4881,5215,27401,34833,10392441,16362324 +74101.519,74134.375,4893,5287,28842,10307427,10504530,17441960 +111152.279,111033.750,4816,5354,27391,35540,10501250,15885611 +148203.038,148254.500,4846,5513,38585,10348064,11315048,24226441 +185253.798,184971.375,4820,5542,27195,10351396,10785446,15701510 +222304.557,221936.500,4838,5803,44252,10396713,11525428,23105232 +259355.317,259488.000,4898,5951,30583,40410,11223742,17004339 +296406.076,296341.250,4846,7840,65045,10622217,13725426,31215979 +303816.228,303628.500,4832,7687,50750,70189,13652481,24864438 +311226.380,311485.750,4861,12546,95744,10559824,16769388,40251373 +318636.532,319052.000,4792,16072,130151,144944,19430469,49238910 +326046.684,326203.500,4892,21999,130656,147082,22434259,50862083 +333456.836,333207.000,4853,28451,165454,182079,27998349,60792027 +340866.988,340919.500,4922,358341,826015,841782,243672158,270691341 +348277.140,347631.125,706645,1383522,2272386,2292099,607772711,705020737 diff --git a/paper_results/synthetic/99.5-4-0.5-10000/cfs b/paper_results/synthetic/99.5-4-0.5-10000/cfs new file mode 100644 index 0000000..9fd370a --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000/cfs @@ -0,0 +1,8 @@ +89643,29904,6510,10602,14845,157697,10006771,10022056 +299277,99835,5747,10685,17498,29256,10007973,10034959 +419612,139981,5588,10680,19730,38511,10008630,10027583 +540137,180182,5427,10673,28780,1746200,10008892,11639482 +567938,189469,5436,10745,500069,10005116,10009057,12083004 +598295,199589,5358,10784,326013,2594736,10009128,11857211 +628683,209723,5442,10951,1036387,10005262,10009689,12463710 +636852,212455,5430,11040,1826887,10001999,10009927,13859711 diff --git a/paper_results/synthetic/99.5-4-0.5-10000/ghost-100us b/paper_results/synthetic/99.5-4-0.5-10000/ghost-100us new file mode 100644 index 0000000..cbf4084 --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000/ghost-100us @@ -0,0 +1,46 @@ +80395,10049,5104,12443,17862,10007832,10009341,10256123 +159725,19965,4900,12379,18284,24691,10009334,10259444 +239684,29960,4900,12452,19133,29843,10010112,10060379 +319846,39980,4921,12550,19970,10007516,10010809,10327675 +399007,49875,4906,12723,20492,76346,10011218,10195646 +480203,60025,4895,12982,21310,10001422,10011706,10302107 +560749,70093,4889,13579,22332,10005099,10012186,10355981 +640306,80037,4894,13967,22904,34701,10012453,10392064 +719905,89987,4880,14377,23704,39298,10012813,10360214 +799652,99956,4851,14716,24345,33282,10012997,10426238 +880238,110029,4855,15128,25510,10001321,10013791,10412767 +960913,120113,4874,15320,25898,10001340,10014145,10901528 +1041870,130233,4849,15694,27252,10001091,10014922,10363937 +1120928,140115,4857,15823,27545,55588,10015563,10307228 +1200696,150086,4866,16021,28258,176850,10017701,10815201 +1278070,159758,4863,16182,28749,123190,10022103,10704608 +1360517,170063,4850,16452,29909,179748,10037642,10935418 +1441371,180170,4854,16588,30661,10001274,10066258,11490765 +1519314,189913,4856,16728,32122,10001223,10126318,15298102 +1601258,200156,4847,16994,33520,220522,10181433,15876310 +1681688,210210,4847,17040,38550,10001222,10436953,20584566 +1760463,220056,4856,17152,45864,10001090,10604560,36887952 +1838619,229826,4840,17251,56115,10001567,10830948,64822189 +1922107,240262,4846,17352,70440,2105090,11186653,38818475 +1998078,249758,4836,17215,102768,4500558,11880915,72406645 +2020102,252511,4843,17192,109131,6923158,12085346,66951522 +2038787,254847,4825,17147,133775,10018737,12895426,168078616 +2061769,257719,4833,17120,150271,10024202,13165652,122485898 +2079914,259987,4837,17078,226322,10064894,14012871,191086966 +2100147,262517,4828,17111,160928,10024551,13065738,73291198 +2123355,265418,4819,17043,193405,10020707,13687614,237586057 +2141516,267688,4839,16979,227851,10074668,14264333,176179018 +2160800,270098,4797,16866,254008,10116052,14665497,229713358 +2181096,272635,4828,16916,245342,10043190,14359610,168041767 +2199466,274932,4839,16739,314575,10205869,16069332,164035163 +2220033,277502,4795,16672,301466,10223151,15260014,349669252 +2242228,280276,4836,16596,323963,10162586,15796391,251580763 +2259230,282402,4824,16520,374490,10287839,16798037,277596305 +2277348,284667,4829,16359,403318,10313844,18043552,624579102 +857193,285727,4832,16440,380861,10221564,16694718,156461775 +860758,286915,4837,16215,447751,10563432,18527870,530195203 +862559,287516,4844,16239,400852,10437506,17949736,289780559 +867806,289264,4841,15645,570737,12327288,25321871,649108085 +870013,290000,4844,15919,528239,11019457,23588201,1357761040 +872443,290810,4838,15866,569184,12241095,26747595,757570537 +876043,292010,4839,16188,3235928,11431607,22988892,822651412 diff --git a/paper_results/synthetic/99.5-4-0.5-10000/ghost-10us b/paper_results/synthetic/99.5-4-0.5-10000/ghost-10us new file mode 100644 index 0000000..9d99638 --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000/ghost-10us @@ -0,0 +1,17 @@ +149850,49949,4915,12731,20558,25073,10011140,10028825 +329830,109942,4853,15147,25495,132335,10013469,10775943 +480522,160171,4872,16284,29471,10001440,10025678,11740222 +660323,220104,4847,17191,41294,10001598,10873268,18055194 +690115,230035,4849,17208,46211,10001930,11499044,20391896 +721516,240502,4859,17311,46921,234168,11476074,33172419 +750221,250070,4853,17249,51776,10001372,13217416,72767725 +763516,254502,4849,17134,54541,10001713,14276985,77727289 +780848,260279,4838,17049,56939,1342139,15779993,82891474 +794485,264825,4831,16555,62800,10001656,18709464,152722219 +810351,270113,4830,15331,78071,10036134,22423718,743700488 +826266,275418,4838,13960,87937,10048667,23304035,532375453 +840324,280104,4850,6053,510749,10270172,25554600,644719458 +853820,284602,4828,6301,5122617,18335237,33313362,2150677872 +863575,287854,4844,5804,3362799,18108955,31477612,2426567593 +870503,290163,4814,6266,3849898,17726556,30213668,946468672 +877874,292620,4849,14537,6762198,17598377,28543562,1859169423 \ No newline at end of file diff --git a/paper_results/synthetic/99.5-4-0.5-10000/ghost-20us b/paper_results/synthetic/99.5-4-0.5-10000/ghost-20us new file mode 100644 index 0000000..b01e310 --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000/ghost-20us @@ -0,0 +1,42 @@ +80588,10073,5235,12197,17405,22082,10008677,10021547 +159857,19982,5052,12317,18254,10007555,10009639,10030741 +239845,29980,4956,12378,19018,63498,10010133,10330635 +319503,39937,4944,12646,20010,10001336,10010884,10455216 +400947,50118,4912,12735,20602,10004754,10011000,10437175 +480752,60093,4852,13101,21662,10001728,10011625,10506461 +558810,69850,4876,13476,22200,10001181,10011880,10518232 +641252,80156,4869,14027,23128,10001395,10012322,10483129 +719697,89961,4861,14350,23623,10001015,10012684,10575569 +800798,100099,4860,14720,24455,10001236,10013116,10566181 +879553,109943,4883,14986,25118,46378,10013289,10737136 +960163,120019,4859,15341,25963,10001159,10014000,10613927 +1038162,129769,4877,15545,26562,10001056,10014627,10581036 +1121175,140146,4863,15793,27471,10001268,10016159,10993948 +1201134,150141,4865,16008,28203,181915,10017224,11251568 +1280298,160036,4841,16264,29258,10001379,10026247,11179154 +1359669,169957,4861,16441,29748,63835,10030543,11155133 +1440191,180023,4864,16571,30858,10001334,10085883,12081538 +1519225,189902,4854,16711,31740,10001236,10099323,13596706 +1599605,199949,4852,16957,33915,10001054,10222805,15604470 +1680508,210062,4853,16986,35882,10001410,10376452,18779845 +1758898,219861,4851,17117,37972,219900,10532933,28159998 +1839575,229945,4829,17240,44611,2168282,11254028,65068228 +1919657,239956,4844,17174,46182,1225047,11428802,44039144 +2001019,250126,4850,17099,51609,1491838,13239875,59695390 +2018794,252348,4823,16950,55389,10001409,15107197,154360678 +2039285,254909,4844,16662,61310,10017181,18156691,227347807 +2059909,257487,4841,16715,58299,10001480,16516337,237745549 +2081289,260159,4821,16703,56801,2917286,15856004,120357457 +2098518,262313,4842,16316,67069,10014072,19232782,142080176 +2122112,265262,4835,15706,73789,10053609,20677379,327436013 +2139357,267418,4823,15685,76796,10035386,20930881,650730517 +2161332,270165,4828,15306,76920,10044031,21035072,543493945 +2179358,272418,4779,15370,76552,10035319,20714862,560401995 +2201121,275138,4815,14027,84614,10037480,22098903,583022998 +2218848,277354,4810,6157,116226,10186223,24204219,1859713115 +2242646,280329,4838,6321,589694,10188202,22785813,1377106812 +2261056,282630,4840,5906,1181673,12108418,27938049,3577564592 +2280367,285044,4838,5840,4527016,18093527,31865479,4110741976 +2301841,287728,4828,5833,2901948,17286314,32117877,2568507884 +2320564,290069,4836,6156,9198909,17782205,29952544,5506759659 +2338750,292342,4826,5998,5863861,16704743,29908794,1876379324 \ No newline at end of file diff --git a/paper_results/synthetic/99.5-4-0.5-10000/ghost-30us b/paper_results/synthetic/99.5-4-0.5-10000/ghost-30us new file mode 100644 index 0000000..fffdf59 --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000/ghost-30us @@ -0,0 +1,9 @@ +239767,29970,4956,12392,19080,10007628,10010215,10348099 +559789,69973,4891,13482,22232,10006718,10012165,10575940 +880986,110122,4842,15061,25338,10000961,10013716,10674103 +1201731,150215,4871,16015,28165,136900,10017566,10686743 +1520031,190002,4837,16697,31503,10001252,10117138,13865673 +1838597,229823,4847,17106,40208,10001394,10947655,26846465 +2159929,269990,4812,15793,91704,10087965,19326587,669343213 +2243249,280404,4841,6090,246300,11943712,25837839,1496060150 +2321529,290189,4839,5965,4677117,16685867,31217803,4185115001 diff --git a/paper_results/synthetic/99.5-4-0.5-10000/shinjuku-10us b/paper_results/synthetic/99.5-4-0.5-10000/shinjuku-10us new file mode 100644 index 0000000..16ef381 --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000/shinjuku-10us @@ -0,0 +1,36 @@ +10000,10008,4727,4798,4842,4894,5453,11793555,11809694,11814542 +20000,20049,4734,4811,4859,4915,7885,11797528,11815115,11830203 +30000,29999,4736,4812,4861,4923,6296,11794475,11809811,11813999 +40000,40105,4730,4806,4856,4925,6303,11794552,11810743,11828531 +50000,49997,4730,4808,4862,4940,7089,11790919,11809001,13460617 +60000,59848,4734,4813,4869,4949,6436,11790331,11809575,11818637 +70000,69962,4733,4812,4869,4954,6210,11788249,11806748,11819125 +80000,79982,4729,4812,4874,4968,7314,11789069,11806002,11817290 +90000,90094,4727,4811,4877,4975,6908,11787480,11807198,11821435 +100000,99783,4735,4822,4889,4988,7236,11789627,11808941,11827168 +110000,109989,4732,4822,4895,5001,7723,11787780,11810260,12132087 +120000,119714,4733,4824,4899,5006,7126,11784401,11801656,11816416 +130000,130143,4729,4821,4899,5009,6668,11783225,11801165,11814038 +140000,139841,4734,4835,4944,5179,7638,11955213,13255843,14779226 +150000,150038,4735,4837,4925,5046,8276,11789299,11814716,11944614 +160000,160103,4734,4837,4928,5052,7943,11788466,11819104,12070237 +170000,170343,4727,4834,4933,5073,8659,11796787,12389107,12940303 +180000,179735,4732,4844,4946,5088,8760,11796919,12249967,13310923 +190000,190385,4736,4854,4963,5116,9367,11800882,12691386,14827587 +200000,199679,4741,4869,4995,5205,9657,11860541,13025138,14500924 +210000,209771,4745,4882,5027,5399,11879,11972474,15296045,19267842 +220000,220281,4744,4885,5040,5581,14352,12028813,18829648,21467337 +230000,230070,4747,4900,5069,5646,10637,12056626,14607636,15743104 +240000,239699,4751,4925,5174,6964,14678,12489707,18522318,21540148 +250000,249675,4759,4944,5260,7483,12237,12824507,16849442,18461004 +260000,259561,4773,5097,7201,10207,18547,14983535,20663131,25903029 +270000,270402,4792,5220,8200,12142,22522,16248588,25416260,29681248 +280000,279900,4836,8456,13395,21963,31888,22913395,34218647,38718327 +290000,290041,4855,8704,13905,18734,30265,22617788,32159553,37090629 +300000,300077,9390,51418,71534,86514,117258,85219514,122900061,124862025 +310000,310281,117521,220190,262549,274009,307213,281901824,298476679,300354945 +320000,320109,198487,364599,409711,445979,460959,431255032,473147189,474908175 +330000,329713,595926,839052,975645,1076452,933680,922902,927424,1040839719 +340000,340137,1221395,1553107,1798366,1959847,1537942,1485129,1500084,1774926340 +350000,349288,2728695,3094102,2674665,2475621,2392436,2348442,2344202,2829528748 +360000,360583,3510970,4045571,4386984,4567398,4678119,4696583,8590432,3116612115 diff --git a/paper_results/synthetic/99.5-4-0.5-10000/shinjuku-30us b/paper_results/synthetic/99.5-4-0.5-10000/shinjuku-30us new file mode 100644 index 0000000..b4d4e53 --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000/shinjuku-30us @@ -0,0 +1,13 @@ +30000,29999,4713,4787,4834,4891,6225,10610735,10626892,10650103 +70000,69962,4715,4789,4840,4910,5675,10611776,10628972,10646236 +110000,109989,4717,4795,4854,4945,6959,10608365,10625821,10932359 +150000,150038,4721,4806,4875,4977,7673,10607314,10623252,10633899 +220000,220281,4732,4833,4932,5102,23006,10619007,12996712,15150510 +260000,259561,4740,4867,5068,10338,28861,11107033,13896786,18585970 +300000,300077,4779,5242,18684,28495,62904,15093479,25877097,29812138 +310000,310281,4795,9689,22223,31216,61107,16315424,23274043,28525285 +320000,320109,4804,12164,24606,45201,78337,17859524,30369548,32426632 +330000,329713,4949,35150,74658,110328,164398,37084518,59840553,60697124 +340000,340137,63468,126907,267910,433889,509132,116418830,172508779,173589330 +350000,349288,451411,653200,741644,790108,867630,262100710,286912079,289605853 +360000,360583,644822,1387142,1974583,2163931,2037185,2074412,713443270,716300623 \ No newline at end of file diff --git a/paper_results/synthetic/99.5-4-0.5-10000/shinjuku-5us b/paper_results/synthetic/99.5-4-0.5-10000/shinjuku-5us new file mode 100644 index 0000000..700419e --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000/shinjuku-5us @@ -0,0 +1,36 @@ +10000,10008,4760,4835,4882,4941,7317,13626985,13701399,13780268 +20000,20049,4750,4827,4878,4942,6910,13620448,13696588,13719910 +30000,29999,4751,4829,4883,4955,7128,13627595,13712266,21458740 +40000,40105,4746,4826,4881,4961,6480,13631944,13695433,13710532 +50000,49997,4744,4826,4887,4978,7241,13641892,13697887,13876528 +60000,59848,4747,4832,4896,4988,6505,13627521,13689643,13706315 +70000,69962,4747,4833,4899,4996,6581,13625256,13686167,13705379 +80000,79982,4749,4842,4916,5023,6853,13619288,13681164,13712052 +90000,90094,4751,4846,4926,5041,7058,13621912,13684871,13717103 +100000,99783,4749,4847,4930,5048,6832,13617189,13676653,13708220 +110000,109989,4750,4855,4947,5076,8402,13621990,13683816,13977071 +120000,119714,4748,4855,4953,5088,7344,13621338,13705643,13800376 +130000,130143,4747,4858,4961,5105,7321,13621219,13715262,13804134 +140000,139841,4753,4875,4991,5154,7944,13639228,13862284,14331474 +150000,150038,4746,4880,5015,5208,8714,13665125,14409095,14876926 +160000,160103,4750,4889,5029,5237,8556,13693692,14265514,16095177 +170000,170343,4756,4912,5086,5423,9469,13880207,16740345,18355109 +180000,179735,4762,4930,5122,5509,9921,13859940,18335141,20799700 +190000,190385,4770,4966,5197,5731,10403,14131931,18712176,20949941 +200000,199679,4785,5051,5611,7976,13333,16095909,25801166,28685951 +210000,209771,4802,5139,6209,8689,14543,18075370,28031551,29938072 +220000,220281,4806,5183,6376,9493,19150,17817239,38078590,40998608 +230000,230070,4844,5520,7468,10252,21176,19694433,41961494,44556045 +240000,239699,4933,7635,11265,15843,23082,29040974,45054325,46706465 +250000,249675,5004,10264,15784,21344,32019,37923331,59257472,62133255 +260000,259561,108138,173717,197496,217142,205550,222974,426610617,429448553 +270000,270402,235899,329705,430780,412573,853284,417114,882938203,886497734 +280000,279900,560313,716223,813878,872862,713699,705125,1388449,1644242796 +290000,290041,1096202,1378471,1165058,2148991,1111739,1128825,2209365,2588368814 +300000,300077,1599910,1861642,2005499,2074870,3218082,4174011,5486505,2920933058 +310000,310281,2489659,2696046,2854194,2941088,5050432,5949276,5061964229,5092470035 +320000,320109,3527763,3864065,4010708,4114338,3740262,3768464,7485195,6869717429 +330000,329713,4653574,5087046,5276773,5404078,9581883,10864693,15612620,6869717429 +340000,340137,6027692,6374174,6632296,6842248,6688469,6662096,13271594,10752384080 +350000,349288,8082052,8457885,7993276,7794589,7646149,7673934,15284431,13031579011 +360000,360583,9212979,9672673,9946033,10212596,18703156,20540851,29834232,15152983675 diff --git a/paper_results/synthetic/99.5-4-0.5-10000/skyloft-10us b/paper_results/synthetic/99.5-4-0.5-10000/skyloft-10us new file mode 100644 index 0000000..e3c9e8b --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000/skyloft-10us @@ -0,0 +1,10 @@ +37050.760,36941.333,4659,5972,7803,11240625,11646001,12216457 +74101.519,73855.667,4587,5140,7054,10126,11148604,12080225 +111152.279,110892.000,4578,5160,7319,9738,11452389,12505789 +148203.038,148383.333,4591,5271,8599,11039142,11906643,12911869 +185253.798,185569.333,4581,5347,11684,11090319,12200483,13234176 +222304.557,222423.667,4607,5539,16047,20779,12512190,19434344 +259355.317,259087.333,4562,6442,23132,31034,13779988,26900056 +296406.076,295957.667,4603,11600,40333,47630,27512121,40290604 +326046.684,325394.000,342694,625665,825309,834364,634344273,710447762 +333456.836,332988.000,449770,961019,1167331,1181305,925543560,1008014063 diff --git a/paper_results/synthetic/99.5-4-0.5-10000/skyloft-20us b/paper_results/synthetic/99.5-4-0.5-10000/skyloft-20us new file mode 100644 index 0000000..1bb3a94 --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000/skyloft-20us @@ -0,0 +1,17 @@ +37050.760,37114.667,4626,5512,7486,9383,10710040,10815968 +74101.519,73998.667,4602,5203,6807,10514835,10591210,10871995 +111152.279,111134.667,4590,5189,6966,8764,10667224,11193089 +148203.038,147928.000,4583,5234,7794,10519240,10940703,11638300 +185253.798,185110.000,4593,5288,10604,22802,11083432,11695768 +222304.557,221705.667,4616,5423,21163,10645379,11320208,18449807 +259355.317,259101.000,4611,5655,23521,10693491,11848486,19651759 +296406.076,296560.000,4619,7966,30497,10882422,13812074,22680515 +303816.228,303768.667,4626,9205,44510,10892993,15565380,28474905 +311226.380,310754.333,4627,8310,35450,45934,14274034,24488811 +318636.532,318384.000,4630,13175,60324,73343,22322604,35348175 +326046.684,325879.333,4629,16220,66594,74659,23829858,36513091 +337161.912,336807.333,15729,115762,176170,193374,78930753,87517943 +340866.988,340849.000,110313,331793,435906,444759,190124573,210744480 +344572.064,344108.667,268983,600445,885705,895693,346962935,420829451 +348277.140,347652.667,433306,900314,1180649,1189332,512645152,559734179 +359392.368,359091.333,807805,1348695,1997291,2007328,711466388,880465339 diff --git a/paper_results/synthetic/99.5-4-0.5-10000/skyloft-30us b/paper_results/synthetic/99.5-4-0.5-10000/skyloft-30us new file mode 100644 index 0000000..bdfb358 --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000/skyloft-30us @@ -0,0 +1,17 @@ +37050.760,37186.000,4615,5180,7067,10326776,10388053,11019894 +74101.519,73981.333,4594,5147,6642,10315731,10364170,10977050 +111152.279,111183.000,4610,5254,6977,8487,10440338,11720233 +148203.038,148622.333,4577,5200,7366,10333518,10583601,11001523 +185253.798,185360.000,4605,5244,9003,31490,10709250,11133464 +222304.557,222087.000,4594,5307,16695,31767,10802243,12007035 +259355.317,259379.667,4609,5517,28115,10358931,11116799,14234356 +296406.076,295872.000,4616,7519,55044,10604147,14217625,25022766 +303816.228,304001.667,4622,8912,85963,10412326,16366174,35114109 +311226.380,311325.667,4597,15074,97305,10697586,18848429,38223252 +318636.532,318739.333,4566,13283,108240,137291,16724241,41425659 +326046.684,325771.000,4608,22199,134346,147812,33671320,48603188 +337161.912,337077.000,4583,38385,250440,274371,40954340,86636079 +348277.140,348315.333,48915,197109,421695,433543,121076269,140494830 +351982.216,352130.333,321351,459150,745374,755264,230517765,240361527 +355687.292,355399.333,575925,942482,1176670,1189157,368244471,378564012 +363097.443,361894.000,859826,1540919,1792030,1814418,518947190,560236969 diff --git a/paper_results/synthetic/99.5-4-0.5-10000/skyloft-inf b/paper_results/synthetic/99.5-4-0.5-10000/skyloft-inf new file mode 100644 index 0000000..d0283f2 --- /dev/null +++ b/paper_results/synthetic/99.5-4-0.5-10000/skyloft-inf @@ -0,0 +1,13 @@ +55299.641,55504.667,4736,6225,6916,7417,10849783,11047820 +110599.282,110562.667,4661,4845,6605,7609,10856570,12054390 +165898.923,165997.333,4656,4827,6621,366596,10859157,12416815 +171428.887,171935.333,4660,4838,6756,1897800,10834216,12507625 +176958.852,176975.000,4658,5122,20493,3266478,10854025,14005487 +182488.816,182695.000,4661,4840,8525,10054543,10879732,12598890 +188018.780,187665.333,4666,4842,14892,2236190,10928982,12703200 +193548.744,193076.000,4656,4882,1398684,10076427,10960034,15258292 +199078.708,198689.333,4659,5133,828131,2473799,10887220,13419415 +204608.672,204865.333,4668,4962,589342,10050551,10874301,12874191 +210138.636,210136.667,4667,4968,1189225,2621649,10999127,13451755 +215668.600,215687.667,4669,5059,820038,2999852,10982661,13963554 +221198.564,221162.000,4664,5142,1983184,10051137,10921680,14558258 \ No newline at end of file diff --git a/params.h.in b/params.h.in new file mode 100644 index 0000000..fbb3f0b --- /dev/null +++ b/params.h.in @@ -0,0 +1,66 @@ +#pragma once + +/* + * CPU mapping (2 sockets, 48 cores, 96 threads) + * (Hyper-threading is not considered yet) + * + * CPU CORE NODE + * 3 3 0 + * 2 2 0 + * 26 26 1 + * 27 27 1 + */ + +#define MAX_NUMA 2 +#define MAX_CPUS 96 +#define CPU_FREQ_MHZ 2000 + +// 0-23 -> 48-71 +// 24-47 -> 72-95 +#define SIBLING_CPU_MAP \ + "48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71," \ + "72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95," \ + "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23," \ + "24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47" + +#define USED_CPUS 24 +#define IO_CPU 23 +#define USED_HW_CPU_LIST "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23" + +#define MAX_TASKS (1024 * 64) +#define MAX_APPS 2 +#define MAX_TASKS_PER_APP (MAX_TASKS / MAX_APPS) +#define MAX_TIMERS 4096 + +#define SOFTIRQ_MAX_BUDGET 16 +#define RUNTIME_RQ_SIZE 32 +#define RUNTIME_STACK_SIZE (16 * 1024) +#define RUNTIME_LARGE_STACK_SIZE (256 * 1024) + +#define POLICY_TASK_DATA_SIZE (2 * 64) +#define POLICY_NAME_SIZE 32 +#if defined(SKYLOFT_SCHED_CFS) || defined(SKYLOFT_SCHED_FIFO) || defined(SKYLOFT_SCHED_FIFO2) +#define SCHED_PERCPU 1 +#endif + +#define TIMER_HZ 20000 +#define PREEMPT_QUAN 5 + +/* + * I/O params + */ +#define IO_MAX_PROC 1024 +#define IO_NUM_MBUFS (8192 * 16) +#define IO_NUM_COMPLETIONS 32767 +#define IO_OVERFLOW_BATCH_DRAIN 64 +#define IO_TX_BURST_SIZE 64 +#define IO_CMD_BURST_SIZE 64 +#define IO_RX_BURST_SIZE 64 +#define IO_CONTROL_BURST_SIZE 4 +#define IO_PKTQ_SIZE 4096 +#define IO_CMDQ_SIZE 4096 + +#define IO_ADDR "10.3.3.3" +#define IO_MAC "90:E2:BA:8C:66:88" +#define IO_GATEWAY "10.3.3.1" +#define IO_NETMASK "255.255.255.0" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..b998a06 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +absl-py diff --git a/scripts/.gitignore b/scripts/.gitignore new file mode 100644 index 0000000..f08278d --- /dev/null +++ b/scripts/.gitignore @@ -0,0 +1 @@ +*.pdf \ No newline at end of file diff --git a/scripts/bench/schbench.sh b/scripts/bench/schbench.sh new file mode 100755 index 0000000..5b1552b --- /dev/null +++ b/scripts/bench/schbench.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +tag=$1 +dir=results_24_$tag + +bin=$(dirname "$0")/../../$base/build/bin/schbench +cmd="$bin -n10 -F128 -m1 -r10 -i5" + +mkdir -p $dir +echo "cores,wake99,rps50,lat99" > $dir/all.csv + +# cores="4 8 16 24 32 40 48" +# cores="64 72 80 96 112 128 144 160 176 192 208 224 240 256" +cores=$(seq 8 8 96) + +for i in $cores; do + echo "Running with $i cores" + output="$dir/$i.txt" + sudo rm -rf /dev/shm/skyloft_* /mnt/huge/skyloft_* + sudo ipcrm -a > /dev/null 2>&1 + sleep 5 + timeout 20 $cmd -t$i 2>&1 | tee $output + + wake=$(cat $output | grep -a "* 99.0th" | tail -n2 | head -n1 | awk '{print $3}') + lat=$(cat $output | grep -a "* 99.0th" | tail -n1 | awk '{print $3}') + rps=$(cat $output | grep -a "* 50.0th" | tail -n1 | awk '{print $3}') + echo "$i,$wake,$rps,$lat" + echo "$i,$wake,$rps,$lat" >> $dir/all.csv +done diff --git a/scripts/build.sh b/scripts/build.sh new file mode 100755 index 0000000..c4e75db --- /dev/null +++ b/scripts/build.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +script_dir=$(dirname $(readlink -f $0)) + +app_name=$1 +sched_policy=$2 +preempt_quantum=$3 + +if [[ $app_name =~ synthetic-(.*) ]]; then + params_file=$script_dir/params/shinjuku.params + build_cmd="make build SCHED=${BASH_REMATCH[1]} UINTR=1 DPDK=0 STAT=0" +elif [[ $app_name =~ schbench-([a-zA-Z0-9]+).* ]]; then + params_file=$script_dir/params/$app_name.params + build_cmd="make schbench SCHED=${BASH_REMATCH[1]} UINTR=1 DPDK=0 STAT=0" +elif [ $app_name == "memcached" ]; then + params_file=$script_dir/params/$app_name.params + build_cmd="make memcached SCHED=fifo UINTR=0 DPDK=1 STAT=0" +elif [[ $app_name =~ rocksdb-server-([a-zA-Z0-9]+) ]]; then + params_file=$script_dir/params/$app_name.params + build_cmd="make rocksdb SCHED=fifo UINTR=1 DPDK=1 STAT=0" +elif [ $app_name == "rocksdb-server" ]; then + params_file=$script_dir/params/$app_name.params + build_cmd="make rocksdb SCHED=fifo UINTR=0 DPDK=1 STAT=0" +elif [ $app_name == "bench_thread" ]; then + params_file=$script_dir/params/thread.params + build_cmd="make build SCHED=rr UINTR=0 DPDK=0 STAT=0" +fi + +# Generate parameters + +output_params=$script_dir/../params.h.in +default_params=$script_dir/params/default.params +if [[ ! -f $params_file ]]; then + echo "Params file $params_file does not exist." + exit 1 +fi +cat $default_params > $output_params +while IFS= read -r line; do + if [[ $line =~ ^#define[[:space:]]+([a-zA-Z_][a-zA-Z_0-9]*) ]]; then + macro_name=${BASH_REMATCH[1]} + echo "#undef $macro_name" >> $output_params + echo "$line" >> $output_params + fi +done < $params_file + +# Build the application + +eval $build_cmd diff --git a/scripts/cmake/rocksdb.mk b/scripts/cmake/rocksdb.mk new file mode 100644 index 0000000..d89c78d --- /dev/null +++ b/scripts/cmake/rocksdb.mk @@ -0,0 +1,32 @@ +include(FetchContent) + +# Fetch source code of rocksdb from github +FetchContent_Declare( + rocksdb + URL https://github.com/facebook/rocksdb/archive/refs/tags/v6.15.5.tar.gz +) +FetchContent_GetProperties(rocksdb) + +if(NOT rocksdb_POPULATED) + FetchContent_Populate(rocksdb) + + # Build rocksdb libraries automatically + # add_subdirectory(${rocksdb_SOURCE_DIR} ${rocksdb_BINARY_DIR}) +endif() + +# Build rocksdb static library with custom command +add_custom_command( + OUTPUT ${rocksdb_SOURCE_DIR}/librocksdb.a + COMMAND ROCKSDB_DISABLE_ZSTD=1 ROCKSDB_DISABLE_BZIP=1 ROCKSDB_DISABLE_LZ4=1 make static_lib -j${ROCKSDB_JOBS} + WORKING_DIRECTORY ${rocksdb_SOURCE_DIR} + COMMENT "Build RocksDB static library" + VERBATIM +) +add_custom_target(rocksdb DEPENDS ${rocksdb_SOURCE_DIR}/librocksdb.a) +add_library(librocksdb STATIC IMPORTED) +set_property(TARGET librocksdb PROPERTY IMPORTED_LOCATION ${rocksdb_SOURCE_DIR}/librocksdb.a) +set_target_properties( + librocksdb PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES ${rocksdb_SOURCE_DIR}/include +) +add_dependencies(librocksdb rocksdb) diff --git a/scripts/disable_cpufreq_scaling.sh b/scripts/disable_cpufreq_scaling.sh new file mode 100755 index 0000000..d04f2fc --- /dev/null +++ b/scripts/disable_cpufreq_scaling.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +cpu_list="2,3,26,27" + +function usage() { + echo "Usage: $0 [OPTIONS] " + echo "" + echo "Options:" + echo " -h Display this message" + echo " -c List of CPUs" + echo " -a Select all CPUs" + echo "" +} + +while getopts "c:a" opt +do + case "$opt" in + c) cpu_list="$OPTARG";; + a) cpu_list=$(cat /sys/devices/system/cpu/online);; + h | ?) usage; exit 0 ;; + esac +done + +cpu_list=$(echo $cpu_list | perl -pe 's/(\d+)-(\d+)/join(",", $1..$2)/eg') + +echo $cpu_list + +for cpu in ${cpu_list//,/ } +do + sudo cpufreq-set -c ${cpu} -g performance + freq=$(cat /sys/devices/system/cpu/cpu${cpu}/cpufreq/scaling_cur_freq) + echo "Set CPU ${cpu} frequency to ${freq} kHz" +done diff --git a/scripts/install_deps.sh b/scripts/install_deps.sh new file mode 100755 index 0000000..05669ab --- /dev/null +++ b/scripts/install_deps.sh @@ -0,0 +1,8 @@ +sudo apt-get install -y \ + cmake \ + cpufrequtils \ + libnuma-dev \ + libgflags-dev \ + zlib1g-dev \ + libzstd-dev \ + libsnappy-dev \ No newline at end of file diff --git a/scripts/make_rootfs.sh b/scripts/make_rootfs.sh new file mode 100755 index 0000000..92bf88f --- /dev/null +++ b/scripts/make_rootfs.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +sudo mkdir mnt +sudo dd if=/dev/zero of=./rootfs.ext4 bs=1M count=32 +sudo mkfs.ext4 rootfs.ext4 +sudo mount rootfs.ext4 mnt + +sudo cp -r $1/_install/* mnt + +cd mnt +sudo mkdir -p sys mnt sys etc/init.d dev tmp proc + +cd dev +sudo mknod console c 5 1 +sudo mknod null c 1 3 +sudo mknod tty1 c 4 1 +sudo mknod ttyS0 c 4 64 + +cd ../ +sudo echo "proc /proc proc defaults 0 0" > etc/fstab +sudo echo "tmpfs /tmp tmpfs defaults 0 0" >> etc/fstab +sudo echo "sysfs /sys sysfs defaults 0 0" >> etc/fstab +sudo chmod 755 etc/fstab + +sudo echo "/bin/mount -a" > etc/init.d/rcS +sudo echo "mount -o remount, rw /" >> etc/init.d/rcS +sudo echo "mkdir -p /dev/pts" >> etc/init.d/rcS +sudo echo "mount -t devpts devpts /dev/pts" >> etc/init.d/rcS +sudo echo "mdev -s" >> etc/init.d/rcS +sudo chmod 755 etc/init.d/rcS + +sudo echo "::sysinit:/etc/init.d/rcS" > etc/inittab +sudo echo "::respawn:-/bin/sh" >> etc/inittab +sudo echo "::askfirst:-/bin/sh" >> etc/inittab +sudo echo "::ctrlaltdel:/bin/umount -a -r" >> etc/inittab +sudo chmod 755 etc/inittab + +cd .. +sudo umount mnt +sudo rm -rf mnt + +gzip --best -c rootfs.ext4 > rootfs.img.gz +rm -rf rootfs.ext4 diff --git a/scripts/params/default.params b/scripts/params/default.params new file mode 100644 index 0000000..fbb3f0b --- /dev/null +++ b/scripts/params/default.params @@ -0,0 +1,66 @@ +#pragma once + +/* + * CPU mapping (2 sockets, 48 cores, 96 threads) + * (Hyper-threading is not considered yet) + * + * CPU CORE NODE + * 3 3 0 + * 2 2 0 + * 26 26 1 + * 27 27 1 + */ + +#define MAX_NUMA 2 +#define MAX_CPUS 96 +#define CPU_FREQ_MHZ 2000 + +// 0-23 -> 48-71 +// 24-47 -> 72-95 +#define SIBLING_CPU_MAP \ + "48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71," \ + "72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95," \ + "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23," \ + "24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47" + +#define USED_CPUS 24 +#define IO_CPU 23 +#define USED_HW_CPU_LIST "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23" + +#define MAX_TASKS (1024 * 64) +#define MAX_APPS 2 +#define MAX_TASKS_PER_APP (MAX_TASKS / MAX_APPS) +#define MAX_TIMERS 4096 + +#define SOFTIRQ_MAX_BUDGET 16 +#define RUNTIME_RQ_SIZE 32 +#define RUNTIME_STACK_SIZE (16 * 1024) +#define RUNTIME_LARGE_STACK_SIZE (256 * 1024) + +#define POLICY_TASK_DATA_SIZE (2 * 64) +#define POLICY_NAME_SIZE 32 +#if defined(SKYLOFT_SCHED_CFS) || defined(SKYLOFT_SCHED_FIFO) || defined(SKYLOFT_SCHED_FIFO2) +#define SCHED_PERCPU 1 +#endif + +#define TIMER_HZ 20000 +#define PREEMPT_QUAN 5 + +/* + * I/O params + */ +#define IO_MAX_PROC 1024 +#define IO_NUM_MBUFS (8192 * 16) +#define IO_NUM_COMPLETIONS 32767 +#define IO_OVERFLOW_BATCH_DRAIN 64 +#define IO_TX_BURST_SIZE 64 +#define IO_CMD_BURST_SIZE 64 +#define IO_RX_BURST_SIZE 64 +#define IO_CONTROL_BURST_SIZE 4 +#define IO_PKTQ_SIZE 4096 +#define IO_CMDQ_SIZE 4096 + +#define IO_ADDR "10.3.3.3" +#define IO_MAC "90:E2:BA:8C:66:88" +#define IO_GATEWAY "10.3.3.1" +#define IO_NETMASK "255.255.255.0" diff --git a/scripts/params/memcached.params b/scripts/params/memcached.params new file mode 100644 index 0000000..826aafa --- /dev/null +++ b/scripts/params/memcached.params @@ -0,0 +1,4 @@ +#define USED_CPUS 5 +#define IO_CPU 4 +#define USED_HW_CPU_LIST "1,2,3,4,22" +#define MAX_APPS 1 \ No newline at end of file diff --git a/scripts/params/rocksdb-server-10us.params b/scripts/params/rocksdb-server-10us.params new file mode 100644 index 0000000..cfb6dbb --- /dev/null +++ b/scripts/params/rocksdb-server-10us.params @@ -0,0 +1,4 @@ +#define USED_CPUS 15 +#define IO_CPU 14 +#define USED_HW_CPU_LIST "1,2,3,4,5,6,7,8,9,10,11,12,13,14,22" +#define TIMER_HZ 100000 diff --git a/scripts/params/rocksdb-server-20us.params b/scripts/params/rocksdb-server-20us.params new file mode 100644 index 0000000..22d2beb --- /dev/null +++ b/scripts/params/rocksdb-server-20us.params @@ -0,0 +1,4 @@ +#define USED_CPUS 15 +#define IO_CPU 14 +#define USED_HW_CPU_LIST "1,2,3,4,5,6,7,8,9,10,11,12,13,14,22" +#define TIMER_HZ 50000 diff --git a/scripts/params/rocksdb-server-5us.params b/scripts/params/rocksdb-server-5us.params new file mode 100644 index 0000000..d673fb7 --- /dev/null +++ b/scripts/params/rocksdb-server-5us.params @@ -0,0 +1,4 @@ +#define USED_CPUS 15 +#define IO_CPU 14 +#define USED_HW_CPU_LIST "1,2,3,4,5,6,7,8,9,10,11,12,13,14,22" +#define TIMER_HZ 200000 diff --git a/scripts/params/rocksdb-server.params b/scripts/params/rocksdb-server.params new file mode 100644 index 0000000..bbd7ba4 --- /dev/null +++ b/scripts/params/rocksdb-server.params @@ -0,0 +1,3 @@ +#define USED_CPUS 15 +#define IO_CPU 14 +#define USED_HW_CPU_LIST "1,2,3,4,5,6,7,8,9,10,11,12,13,14,22" diff --git a/scripts/params/schbench-cfs-50us.params b/scripts/params/schbench-cfs-50us.params new file mode 100644 index 0000000..326e4bd --- /dev/null +++ b/scripts/params/schbench-cfs-50us.params @@ -0,0 +1,3 @@ +#define USED_CPUS 22 +#define USED_HW_CPU_LIST "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21" +#define TIMER_HZ 100000 diff --git a/scripts/params/schbench-rr-1ms.params b/scripts/params/schbench-rr-1ms.params new file mode 100644 index 0000000..a6c4c8e --- /dev/null +++ b/scripts/params/schbench-rr-1ms.params @@ -0,0 +1,4 @@ +#define USED_CPUS 24 +#define USED_HW_CPU_LIST "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23" +#define TIMER_HZ 5000 +#define PREEMPT_QUAN 5 diff --git a/scripts/params/schbench-rr-200us.params b/scripts/params/schbench-rr-200us.params new file mode 100644 index 0000000..2e8847c --- /dev/null +++ b/scripts/params/schbench-rr-200us.params @@ -0,0 +1,4 @@ +#define USED_CPUS 24 +#define USED_HW_CPU_LIST "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23" +#define TIMER_HZ 25000 +#define PREEMPT_QUAN 5 diff --git a/scripts/params/schbench-rr-50us.params b/scripts/params/schbench-rr-50us.params new file mode 100644 index 0000000..231e709 --- /dev/null +++ b/scripts/params/schbench-rr-50us.params @@ -0,0 +1,4 @@ +#define USED_CPUS 22 +#define USED_HW_CPU_LIST "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21" +#define TIMER_HZ 100000 +#define PREEMPT_QUAN 5 diff --git a/scripts/params/schbench-rr.params b/scripts/params/schbench-rr.params new file mode 100644 index 0000000..7bff9fe --- /dev/null +++ b/scripts/params/schbench-rr.params @@ -0,0 +1,2 @@ +#define USED_CPUS 24 +#define USED_HW_CPU_LIST "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23" diff --git a/scripts/params/shinjuku.params b/scripts/params/shinjuku.params new file mode 100644 index 0000000..482a54a --- /dev/null +++ b/scripts/params/shinjuku.params @@ -0,0 +1,2 @@ +#define USED_CPUS 21 +#define USED_HW_CPU_LIST "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20" diff --git a/scripts/params/thread.params b/scripts/params/thread.params new file mode 100644 index 0000000..6208403 --- /dev/null +++ b/scripts/params/thread.params @@ -0,0 +1,2 @@ +#define USED_CPUS 1 +#define USED_HW_CPU_LIST "1" diff --git a/scripts/plots/common.py b/scripts/plots/common.py new file mode 100644 index 0000000..5e734d4 --- /dev/null +++ b/scripts/plots/common.py @@ -0,0 +1,11 @@ +import os +import matplotlib.pyplot as plt + +plt.rcParams['axes.labelsize'] = 11 + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +RES_DIR = os.path.join(BASE_DIR, "../../paper_results") + +COLORS = plt.get_cmap("tab10") +COLORS2 = plt.get_cmap("tab20") +COLORS3 = plt.get_cmap("tab20c") diff --git a/scripts/plots/plot_memcached.py b/scripts/plots/plot_memcached.py new file mode 100644 index 0000000..e76b405 --- /dev/null +++ b/scripts/plots/plot_memcached.py @@ -0,0 +1,115 @@ +import os +import argparse +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.ticker import (LogLocator, MultipleLocator, AutoMinorLocator) + +from common import * + + +def load_result(name): + f = open(name, "r") + results = [] + for i, line in enumerate(f.readlines()): + # if i % 2 == 0: + # continue + if line.strip() == "": + break + results.append(line.split(", ")) + + results = np.array(results) + throughputs = results[:, 2] + latency = results[:, -3] + return throughputs, latency + + +def plot(data=None, output=None): + bar_colors = [ + COLORS(0), + COLORS(2), + COLORS(4), + COLORS(1), + COLORS(3), + ] + markers = [ + ",:", + ".-", + "+-", + "x-", + "o-", + ] + labels = [ + "Shenango", + "Skyloft", + "Skyloft (20$\mu$s)", + "Skyloft (10$\mu$s)", + "Skyloft (5$\mu$s)", + "Skyloft (5$\mu$s)", + ] + fnames = [ + "shenango", + "skyloft", + # "skyloft_20us", + # "skyloft_10us", + ] + zorder = [1, 4, 3, 2] + + results = {} + res_dir = os.path.join(RES_DIR, data) + for f in os.listdir(res_dir): + if f not in fnames: + continue + thourghputs, latencies = load_result(os.path.join(res_dir, f)) + results[f] = {} + print(f, thourghputs, latencies) + for throughput, latency in zip(thourghputs, latencies): + results[f][float(throughput) / 1000000] = float(latency) + + fig = plt.figure(figsize=(6, 2.5)) + + ax1 = fig.add_subplot(1, 1, 1) + ax1.grid(which="major", axis="y", linestyle=":", alpha=0.5, zorder=0) + + ax1.set_xlim(0, 2.6) + ax1.set_xticks([0, 0.5, 1, 1.5, 2, 2.5]) + ax1.set_xticklabels(["0", "0.5", "1", "1.5", "2", "2.5"]) + ax1.xaxis.set_minor_locator(MultipleLocator(0.1)) + + ax1.set_ylim(0, 500) + ax1.set_yticks([0, 100, 200, 300, 400, 500]) + ax1.yaxis.set_minor_locator(MultipleLocator(50)) + + for i, e in enumerate(fnames): + # if e == "skyloft_nopre": + # continue + ax1.plot( + list(results[e].keys()), + list(results[e].values()), + markers[i], + label=labels[i], + zorder=zorder[i], + linewidth=1, + markersize=4, + markeredgewidth=1, + color=bar_colors[i], + ) + + # Create a unique legend + handles, labels = plt.gca().get_legend_handles_labels() + by_label = dict(zip(labels, handles)) + leg = plt.legend( + by_label.values(), by_label.keys(), loc="best", ncol=1, frameon=False, + ) + leg.get_frame().set_linewidth(0.0) + + fig.supylabel("99.9% Latency ($\mu$s)", x=0.01) + fig.supxlabel("Throughput (MRPS)", y=0.005) + + plt.tight_layout() + plt.subplots_adjust(top=0.97, bottom=0.18, left=0.11, right=0.97) + plt.savefig(output) + plt.show() + + +if __name__ == "__main__": + plot("memcached/USR", "memcached.pdf") diff --git a/scripts/plots/plot_schbench.py b/scripts/plots/plot_schbench.py new file mode 100644 index 0000000..5720821 --- /dev/null +++ b/scripts/plots/plot_schbench.py @@ -0,0 +1,107 @@ +import os +import argparse +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.ticker import (MultipleLocator, AutoMinorLocator) + +from common import * + + +parser = argparse.ArgumentParser() +parser.add_argument("--output", type=str, help="output file", default="schbench.pdf") +args = parser.parse_args() + +FNAMES = [ + "linux_rr", + "linux_cfs", + "skyloft_rr50us", + "skyloft_cfs50us", +] +LABELS = [ + "Linux-RR", + "Linux-CFS", + "Skyloft-RR", + "Skyloft-CFS", +] +MARKERS = [ + ".:", + "+--", + "x-", + "o-", +] +COLORS = [ + COLORS(0), + COLORS(4), + COLORS(1), + COLORS(3), +] + +def load_result(name): + f = open(name, "r") + results = [list(map(int, line.strip().split(","))) for line in f.readlines()[1:]] + results = list(filter(lambda x: x[0] >=8 and x[0] <= 96, results)) + results = np.array(results) + + cores = results[:, 0] + lat = results[:, 1] + lat2 = results[:, 3] + rps = results[:, 2] / 1000 + return cores, lat, lat2, rps + +def plot_one(ax, idx, x, y): + ax.plot( + x, + y, + MARKERS[idx], + label=LABELS[idx], + zorder=3, + linewidth=1, + markersize=4, + markeredgewidth=1, + color=COLORS[idx], + ) + +def plot(data=None, output=None): + res_dir = os.path.join(RES_DIR, data) + fig, ax = plt.subplots(figsize=(6, 2.5)) + axs = [ax] + + axs[0].set_yscale('log', base=10) + axs[0].set_ylim(1, 20000) + axs[0].set_yticks([1, 10, 100, 1000, 10000]) + axs[0].set_yticklabels(["1$\mu$s", "10$\mu$s", "100$\mu$s", "1ms", "10ms"]) + + for idx, ax in enumerate(axs): + ax.grid(which="major", axis="y", linestyle="dashed", alpha=0.5, zorder=0) + + ax.set_xlim(0, 100) + xticks = [16 * i for i in range(0, 7)] + ax.set_xticks(xticks) + ax.set_xticklabels(xticks) + + ax.xaxis.set_minor_locator(MultipleLocator(8)) + + for i, fname in enumerate(FNAMES): + results = load_result(f'{res_dir}/{fname}/all.csv') + print(fname, results) + plot_one(ax, i, results[0], results[idx + 1]) + + handles, labels = ax.get_legend_handles_labels() + plt.legend(handles, labels, loc='lower right', + bbox_to_anchor=(1, -0.03), + ncol=1, + frameon=False, + handlelength=2.5, + ) + + fig.supylabel("99% wakeup latency", x=0.01) + fig.supxlabel("Number of worker threads", y=0.005) + + plt.tight_layout() + plt.subplots_adjust(top=0.97, bottom=0.18, left=0.13, right=0.97) + plt.savefig(output) + plt.show() + + +if __name__ == "__main__": + plot("schbench", args.output) diff --git a/scripts/plots/plot_schbench2.py b/scripts/plots/plot_schbench2.py new file mode 100644 index 0000000..e1e7dc5 --- /dev/null +++ b/scripts/plots/plot_schbench2.py @@ -0,0 +1,120 @@ +import os +import argparse +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.ticker import (MultipleLocator, AutoMinorLocator) + +from common import * + + +parser = argparse.ArgumentParser() +parser.add_argument("--output", type=str, help="output file", default="schbench2.pdf") +args = parser.parse_args() + +FNAMES = [ + "skyloft_fifo", + # "skyloft_rr100ms", + # "skyloft_rr10ms", + "skyloft_rr1ms", + "skyloft_rr200us", + "skyloft_rr50us", +] +LABELS = [ + "Skyloft-FIFO", + # "Skyloft RR (100ms)", + # "Skyloft RR (10ms)", + "Skyloft-RR (1ms)", + "Skyloft-RR (200$\mu$s)", + "Skyloft-RR (50$\mu$s)", +] +# "50us": ".-.", +# "30us": "*--", +# "20us": "p-", +# "15us": ".-", +# "10us": "x-", +# "5us": ".:", +# "inf": ",:", +MARKERS = [ + ".:", + "+--", + "*-", + "x-", + # ".:", + # "2--", +] +COLORS = [ + COLORS(0), + COLORS(4), + COLORS(2), + COLORS(1), +] + +def load_result(name): + f = open(name, "r") + results = [list(map(int, line.strip().split(","))) for line in f.readlines()[1:]] + results = list(filter(lambda x: x[0] >=8 and x[0] <= 96, results)) + results = np.array(results) + + cores = results[:, 0] + lat = results[:, 1] + lat2 = results[:, 3] + rps = results[:, 2] / 1000 + return cores, lat, lat2, rps + +def plot_one(ax, idx, x, y): + ax.plot( + x, + y, + MARKERS[idx], + label=LABELS[idx], + zorder=3, + linewidth=1, + markersize=4, + # markeredgewidth=2, + color=COLORS[idx], + ) + +def plot(data=None, output=None): + res_dir = os.path.join(RES_DIR, data) + fig, ax = plt.subplots(figsize=(6, 2.5)) + axs = [ax] + + axs[0].set_yscale('log', base=10) + axs[0].set_ylim(1, 10000) + axs[0].set_yticks([1, 10, 100, 1000, 10000]) + axs[0].set_yticklabels(["1$\mu$s", "10$\mu$s", "100$\mu$s", "1ms", "10ms"]) + + for idx, ax in enumerate(axs): + ax.grid(which="major", axis="y", linestyle="dashed", alpha=0.5, zorder=0) + + ax.set_xlim(0, 100) + xticks = [16 * i for i in range(0, 7)] + ax.set_xticks(xticks) + ax.set_xticklabels(xticks) + + ax.xaxis.set_minor_locator(MultipleLocator(8)) + + for i, fname in enumerate(FNAMES): + results = load_result(f'{res_dir}/{fname}/all.csv') + print(fname, results) + plot_one(ax, i, results[0], results[idx + 1]) + + handles, labels = ax.get_legend_handles_labels() + plt.legend(handles, labels, loc='lower right', + bbox_to_anchor=(1, -0.03), + ncol=1, + frameon=False, + handlelength=2.5, + ) + + fig.supylabel("99% wakeup latency", x=0.01) + fig.supxlabel("Number of worker threads", y=0.005) + + plt.tight_layout() + plt.subplots_adjust(top=0.97, bottom=0.18, left=0.13, right=0.97) + plt.savefig(output) + plt.show() + + +if __name__ == "__main__": + plot("schbench", args.output) diff --git a/scripts/plots/plot_synthetic.py b/scripts/plots/plot_synthetic.py new file mode 100644 index 0000000..093978a --- /dev/null +++ b/scripts/plots/plot_synthetic.py @@ -0,0 +1,146 @@ +import os +import argparse +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.ticker import MultipleLocator, AutoMinorLocator +from matplotlib.lines import Line2D + +from common import * + +BAR_COLORS = { + "cfs": COLORS(0), + "skyloft-30us": COLORS(1), + "ghost-30us": COLORS(4), + "shinjuku-30us": COLORS(2), +} +MARKERS = { + "cfs": ".:", + "ghost-30us": "+-", + "shinjuku-30us": "*-", + "skyloft-30us": "x-", +} +LABELS = { + "cfs": "CFS", + "ghost-30us": "ghOSt (30$\mu$s)", + "skyloft-30us": "Skyloft (30$\mu$s)", + "shinjuku-30us": "Shinjuku (30$\mu$s)", +} +FNAMES = [ + "cfs", + "ghost-30us", + "shinjuku-30us", + "skyloft-30us", +] + +def load_result(name): + f = open(name, "r") + results = np.array([line.split(",") for line in f.readlines()]) + throughputs = results[:, 1] + lat_99ths = results[:, -4] + return throughputs, lat_99ths + + +def load_result_be(name): + f = open(name, "r") + results = np.array([line.split(",") for line in f.readlines()]) + throughputs = results[:, 0] + cpu_shares = results[:, -1] + return throughputs, cpu_shares + + +def plot(id, results, output=None): + fig, ax1 = plt.subplots(figsize=(4, 2.5)) + + ax1.set_ylabel("99% Latency ($\mu$s)") + ax1.get_yaxis().set_label_coords(-0.12, 0.5) + ax1.set_xlabel("Throughput (kRPS)") + ax1.grid(which="major", axis="y", linestyle=":", alpha=0.5, zorder=0) + + if id < 2: + ax_xticks = range(0, 351, 100) + ax1.set_xlim(0, 360) + ax1.set_xticks(ax_xticks) + ax1.set_xticklabels([xtick for xtick in ax_xticks]) + ax1.xaxis.set_minor_locator(MultipleLocator(20)) + + ax1_yticks = range(0, 1001, 200) + ax1.set_ylim(-30, 1000) + ax1.set_yticks(ax1_yticks, minor=False) + ax1.set_yticklabels([ytick for ytick in ax1_yticks]) + ax1.yaxis.set_minor_locator(MultipleLocator(100)) + else: + ax1.set_ylabel("Batch CPU Share") + ax_xticks = range(0, 351, 100) + ax1.set_xlim(0, 310) + ax1.set_xticks(ax_xticks) + ax1.set_xticklabels([xtick for xtick in ax_xticks]) + ax1.xaxis.set_minor_locator(MultipleLocator(20)) + + ax1_yticks = [0, 0.2, 0.4, 0.6, 0.8, 1] + ax1.set_ylim(-0.05, 1) + ax1.set_yticks(ax1_yticks, minor=False) + ax1.set_yticklabels([ytick for ytick in ax1_yticks]) + + ax1.text( 100, 0.2, "➊", fontsize=14, va='center', ha='center') + ax1.quiver(110, 0.16, 20, -0.12, scale=1, scale_units='xy', angles='xy', + width=0.004, headwidth=5 + ) + + for e in FNAMES: + ax1.plot( + list(results[e].keys()), + list(results[e].values()), + MARKERS[e], + label=LABELS[e], + zorder=3, + linewidth=1, + markersize=4, + color=BAR_COLORS[e], + ) + + if id == 0: + # Create a unique legend + handles, labels = plt.gca().get_legend_handles_labels() + by_label = dict(zip(labels, handles)) + leg = plt.legend(by_label.values(), by_label.keys(), + loc="best", ncol=1, frameon=False, handlelength=2.5 + ) + leg.get_frame().set_linewidth(0.0) + + plt.tight_layout() + plt.subplots_adjust(top=0.97, bottom=0.18, left=0.15, right=0.97) + plt.savefig(output) + plt.show() + + +if __name__ == "__main__": + results = {} + res_dir = os.path.join(RES_DIR, "synthetic", "99.5-4-0.5-10000") + for f in os.listdir(res_dir): + results[f] = {} + thourghputs, lat_99ths = load_result(os.path.join(res_dir, f)) + for throughput, lat_99th in zip(thourghputs, lat_99ths): + results[f][float(throughput) / 1000] = int(lat_99th) / 1000 + plot(0, results, "synthetic-a.pdf") + + results_lc = {} + results_be = {} + res_dir = os.path.join(RES_DIR, "synthetic", "99.5-4-0.5-10000-lcbe") + for f in os.listdir(res_dir): + if "-lc" in f: + thourghputs, lat_99ths = load_result(os.path.join(res_dir, f)) + res = results_lc + values = map(lambda x: int(x) / 1000, lat_99ths) + elif "-be" in f: + thourghputs, cpu_shares = load_result_be(os.path.join(res_dir, f)) + res = results_be + values = map(float, cpu_shares) + + f = f[:-3] + res[f] = {} + print(f) + for throughput, v in zip(thourghputs, values): + print(throughput, v) + res[f][float(throughput) / 1000] = v + plot(1, results_lc, "synthetic-b.pdf") + plot(2, results_be, "synthetic-c.pdf") diff --git a/scripts/plots/requirements.txt b/scripts/plots/requirements.txt new file mode 100644 index 0000000..4b29dc2 --- /dev/null +++ b/scripts/plots/requirements.txt @@ -0,0 +1,2 @@ +matplotlib==3.9.2 +numpy==2.1.0 diff --git a/scripts/run.sh b/scripts/run.sh new file mode 100755 index 0000000..6a9e18d --- /dev/null +++ b/scripts/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +APP=hello + +BIN_DIR=$(dirname "$0")/../build/bin + +if [ ! -z "$1" ]; then + APP=$1 + shift 1; +fi + +sudo rm -rf /dev/shm/skyloft_* /mnt/huge/skyloft_* +sudo ipcrm -a > /dev/null 2>&1 + +sudo ${BIN_DIR}/$APP $@ diff --git a/scripts/run_experiments.sh b/scripts/run_experiments.sh new file mode 100755 index 0000000..80f00d3 --- /dev/null +++ b/scripts/run_experiments.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +APP=shinjuku + +BIN_DIR=$(dirname "$0")/../build/bin + +if [ ! -z "$1" ]; then + APP=$1 + shift 1; +fi + +ROCKSDB_DIR=/tmp/skyloft_rocksdb_$(whoami) +OUTPUT_DIR=/tmp/skyloft_experiment_$(whoami) + +mkdir -p ${OUTPUT_DIR} +sudo rm -rf /dev/shm/skyloft_* /mnt/huge/skyloft_* +sudo ipcrm -a > /dev/null 2>&1 + +sudo ${BIN_DIR}/$APP --rocksdb_path=${ROCKSDB_DIR} --output_path=${OUTPUT_DIR} $@ diff --git a/scripts/run_shinjuku.sh b/scripts/run_shinjuku.sh new file mode 100755 index 0000000..b985121 --- /dev/null +++ b/scripts/run_shinjuku.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +APP=shinjuku + +BIN_DIR=$(dirname "$0")/../build/bin + +loads="$(seq 0.1 0.1 0.7) $(seq 0.8 0.02 0.9)" +# loads="$(seq 0.8 0.1 0.8)" + +mkdir -p /tmp/skyloft_synthetic + +for i in $loads; do + echo "Load: $i" + sudo rm -rf /dev/shm/skyloft_* /mnt/huge/skyloft_* + # gdb --args ${BIN_DIR}/$APP --run_time=5 \ + ${BIN_DIR}/$APP --run_time=5 \ + --num_workers=20 \ + --get_service_time=4000 \ + --range_query_service_time=10000000 \ + --range_query_ratio=0.005 \ + --load=$i \ + --fake_work \ + --preemption_quantum=30 \ + --output_path=./data + # --detailed_print +done diff --git a/scripts/run_synthetic_lc.sh b/scripts/run_synthetic_lc.sh new file mode 100755 index 0000000..d8f3fb4 --- /dev/null +++ b/scripts/run_synthetic_lc.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +set -x + +LC_APP=shinjuku +LC_OUT_FILE=./data-lc +NUM_WORKERS=20 +RUN_TIME=5 + +BIN_DIR=$(dirname "$0")/../build/bin + +loads="$(seq 0.1 0.1 0.7) $(seq 0.8 0.02 0.9)" +# loads="$(seq 0.8 0.1 0.8)" + +rm $LC_OUT_FILE +touch $LC_OUT_FILE +mkdir -p /tmp/skyloft_synthetic + +for i in $loads; do + echo "Load: $i" + sudo rm -rf /dev/shm/skyloft_* /mnt/huge/skyloft_* + sudo ipcrm -a > /dev/null 2>&1 + # gdb --args ${BIN_DIR}/$APP --run_time=5 \ + ${BIN_DIR}/$LC_APP \ + --get_service_time=4000 \ + --range_query_service_time=10000000 \ + --fake_work \ + --load=$i \ + --range_query_ratio=0.005 \ + --preemption_quantum=30 \ + --run_time=$RUN_TIME \ + --num_workers=$NUM_WORKERS \ + --output_path=$LC_OUT_FILE + # --detailed_print +done diff --git a/scripts/run_synthetic_lcbe.sh b/scripts/run_synthetic_lcbe.sh new file mode 100755 index 0000000..3044650 --- /dev/null +++ b/scripts/run_synthetic_lcbe.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +# set -x + +LC_APP=shinjuku +LC_OUT_FILE=./data-lc +LC_GUARANTEED_CPUS=4 +BE_APP=antagonist +BE_OUT_FILE=./out-be +BE_DATA_FILE=./data-be +NUM_WORKERS=20 +RUN_TIME=5 + +BIN_DIR=$(dirname "$0")/../build/bin + +loads="$(seq 0.1 0.1 0.7) $(seq 0.8 0.02 0.9)" +# loads="$(seq 0.8 0.1 0.8)" + +mkdir -p /tmp/skyloft_synthetic +rm $LC_OUT_FILE $BE_OUT_FILE $BE_DATA_FILE +touch $LC_OUT_FILE $BE_OUT_FILE $BE_DATA_FILE + +for i in $loads; do + echo "Load: $i" + sudo rm -rf /dev/shm/skyloft_* /mnt/huge/skyloft_* + sudo ipcrm -a > /dev/null 2>&1 + # gdb --args ${BIN_DIR}/$APP --run_time=5 \ + sudo ${BIN_DIR}/$LC_APP \ + --get_service_time=4000 \ + --range_query_service_time=10000000 \ + --fake_work \ + --load=$i \ + --range_query_ratio=0.005 \ + --preemption_quantum=30 \ + --run_time=$RUN_TIME \ + --num_workers=$NUM_WORKERS \ + --guaranteed_cpus=$LC_GUARANTEED_CPUS \ + --output_path=$LC_OUT_FILE & + # --detailed_print + sleep 2 + sudo ${BIN_DIR}/$BE_APP \ + --run_time=$RUN_TIME \ + --num_workers=$NUM_WORKERS \ + --output_path=$BE_OUT_FILE + sleep 2 + + rps=$(tail -n 1 $LC_OUT_FILE | cut -d ',' -f 2) + share=$(tail -n 1 $BE_OUT_FILE) + echo "$rps,$share" + echo "$rps,$share" >> $BE_DATA_FILE +done diff --git a/scripts/setup_host.sh b/scripts/setup_host.sh new file mode 100755 index 0000000..f336319 --- /dev/null +++ b/scripts/setup_host.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +echo 0 > /proc/sys/kernel/hung_task_timeout_secs +echo 1 > /sys/module/rcupdate/parameters/rcu_cpu_stall_suppress +echo never > /sys/kernel/mm/transparent_hugepage/enabled +echo never > /sys/kernel/mm/transparent_hugepage/defrag + +chmod -R 777 /dev/hugepages + +echo 8192 | tee /sys/devices/system/node/node*/hugepages/hugepages-2048kB/nr_hugepages +mkdir -p /mnt/huge || true +mount -t hugetlbfs -opagesize=2M nodev /mnt/huge +chmod 777 /mnt/huge + +modprobe uio_pci_generic diff --git a/synthetic/CMakeLists.txt b/synthetic/CMakeLists.txt new file mode 100644 index 0000000..01d5263 --- /dev/null +++ b/synthetic/CMakeLists.txt @@ -0,0 +1,32 @@ +include(${CMAKE_SCRIPTS}/rocksdb.mk) + +set(COMMON_SRCS rocksdb/random.cc rocksdb/common.cc) + +if (UINTR) + if (SCHED_POLICY STREQUAL "sq") + add_executable(shinjuku rocksdb/shinjuku.cc ${COMMON_SRCS}) + target_link_libraries(shinjuku PRIVATE + skyloft + librocksdb + z + gflags + snappy + ) + target_include_directories(shinjuku PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb) + elseif (SCHED_POLICY STREQUAL "sq_lcbe") + add_executable(shinjuku rocksdb/shinjuku.cc ${COMMON_SRCS}) + target_link_libraries(shinjuku PRIVATE + skyloft + librocksdb + z + gflags + snappy + ) + target_include_directories(shinjuku PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb) + + set(ANTAGONIST_SOURCES antagonist/main.cc) + add_executable(antagonist ${ANTAGONIST_SOURCES}) + target_link_libraries(antagonist PRIVATE skyloft gflags) + target_include_directories(antagonist PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb) + endif() +endif() diff --git a/synthetic/antagonist/main.cc b/synthetic/antagonist/main.cc new file mode 100644 index 0000000..72df900 --- /dev/null +++ b/synthetic/antagonist/main.cc @@ -0,0 +1,114 @@ +#include +#include + +#include +#include +#include + +DEFINE_int32(run_time, 5, "Running time (s) of the experiment."); +DEFINE_double(period, 100, "For each period (ms), the worker uses a fixed share of CPU time."); +DEFINE_int32(num_workers, 1, "The number of workers."); +DEFINE_double(work_share, 1.0, + "Each thread tries to target this share of the cycles on a CPU. For " + "example, if 'work_share' is 0.5, each thread tries to target 50%% " + "of cycles on a CPU. Note that 'work_share' must be greater than or " + "equal to 0.0 and less than or equal to 1.0. (default: 1.0)"); +DEFINE_string(output_path, "/tmp/skyloft_synthetic", "The path to the experiment results."); + +struct worker_t { + __nsec start; + __nsec usage; + int nth; +} __aligned_cacheline; + +static struct worker_t workers[USED_CPUS]; + +static void synthetic_worker(void *arg) +{ + struct worker_t *worker = (struct worker_t *)arg; + __nsec period = FLAGS_period * NSEC_PER_MSEC; + __nsec share = FLAGS_work_share * period; + __nsec usage, usage_start, finish; + int n; + + if (!worker->start) { + worker->start = now_ns(); + } + + while (1) { + if (now_ns() - worker->start > FLAGS_run_time * NSEC_PER_SEC) + break; + + /* nth period */ + n = (now_ns() - worker->start + period - 1) / period; + if (n <= worker->nth) { + sl_task_yield(); + continue; + } + worker->nth = n; + + finish = worker->start + n * period; + usage = 0; + usage_start = now_ns(); + while (now_ns() < finish && (usage = now_ns() - usage_start) < share); + worker->usage += usage; + + sl_task_yield(); + } +} + +static void write_results() +{ + int i; + __nsec usage = 0; + + printf("Antagonist CPU share:\n"); + for (i = 1; i < FLAGS_num_workers + 1; i++) { + usage += workers[i].usage; + printf("\tWorker %d %.3lf\n", i, (double)workers[i].usage / FLAGS_run_time / NSEC_PER_SEC); + } + printf("\tTotal %.3lf\n", (double)usage / FLAGS_run_time / NSEC_PER_SEC / FLAGS_num_workers); + + FILE *file = fopen(FLAGS_output_path.c_str(), "a"); + fprintf(file, "%.3lf\n", (double)usage / FLAGS_run_time / NSEC_PER_SEC / FLAGS_num_workers); + fclose(file); +} + +static void antagonist_main(void *arg) +{ + int i; + __nsec usage = 0; + + printf("Antagonist %d starts on CPU %d\n", sl_current_app_id(), sl_current_cpu_id()); + + for (i = 0; i < FLAGS_num_workers; i++) { + workers[i].start = 0; + workers[i].usage = 0; + workers[i].nth = -1; + } + + for (i = 1; i < FLAGS_num_workers + 1; i++) { + sl_task_spawn_oncpu(i, synthetic_worker, (void *)&workers[i], 0); + } + + sl_task_yield(); + + write_results(); + + exit(EXIT_SUCCESS); +} + +int main(int argc, char **argv) +{ + gflags::SetUsageMessage("antagonist [options]"); + gflags::ParseCommandLineFlags(&argc, &argv, true); + + if (FLAGS_num_workers > USED_CPUS) { + perror("Too many workers\n"); + } + + gflags::ShutDownCommandLineFlags(); + + printf("Antagonist with %d thread(s)\n", FLAGS_num_workers); + sl_libos_start(antagonist_main, NULL); +} \ No newline at end of file diff --git a/synthetic/rocksdb/common.cc b/synthetic/rocksdb/common.cc new file mode 100644 index 0000000..a921a15 --- /dev/null +++ b/synthetic/rocksdb/common.cc @@ -0,0 +1,411 @@ +#include +#include +#include + +#include +#include + +#include + +#include "common.h" +#include "random.h" +#include "utils/time.h" + +DEFINE_string(rocksdb_path, "/tmp/skyloft_rocksdb", + "The path to the RocksDB database. Creates the database if it does not exist."); +DEFINE_int32(rocksdb_cache_size, 1024, "The size of the RocksDB cache in MB."); +DEFINE_double(range_query_ratio, 0.001, "The share of requests that are range queries."); +DEFINE_int32(range_query_size, 500, "The size of range."); +DEFINE_int32(get_service_time, 1000, "The duration of Get requests (ns)."); +DEFINE_int32(range_query_service_time, 1000000, "The duration of Range queries (ns)."); +DEFINE_double(load, 0.75, "The ratio of target throughput to max throughput."); +DEFINE_int32(run_time, 5, "Running time (s) of the experiment."); +DEFINE_int32(discard_time, 2, + "Discards all results from when the experiment starts to discard time (s) elapses."); +DEFINE_string(output_path, "/tmp/skyloft_synthetic", "The path to the experiment results."); +DEFINE_int32(num_workers, 2, "The number of workers."); +DEFINE_bool(bench_request, false, "Benchmark request service time."); +DEFINE_bool(fake_work, false, "Use fake work (spin) instead of real database operations."); +DEFINE_int32(preemption_quantum, 0, + "Turn off time-based preemption by setting the preemption quantum to 0."); +DEFINE_bool(detailed_print, false, "Print detailed experiment results."); +DEFINE_bool(slowdown_print, false, "Print experiment results of request slowdown."); +DEFINE_int32(guaranteed_cpus, 5, "Guranteed number of CPUs when running with batch app."); +DEFINE_int32(adjust_quantum, 20, "Scheduler makes core allocation decision every quantum (us)."); +DEFINE_double(congestion_thresh, 0.05, "Threshold to detect congestion of applications."); + +static void write_percentiles(std::vector &results, FILE *file, bool stdout = false) +{ + if (!results.size()) + return; + + std::sort(results.begin(), results.end()); + uint64_t sum = 0; + for (auto &r : results) sum += r; + + int size = results.size() % 2 == 0 ? results.size() - 1 : results.size(); + uint64_t avg = (sum + results.size() / 2) / results.size(); + uint64_t min = results.front(); + uint64_t max = results.back(); + uint64_t p50 = results.at(size * 0.5); + uint64_t p90 = results.at(size * 0.9); + uint64_t p95 = results.at(size * 0.95); + uint64_t p99 = results.at(size * 0.99); + uint64_t p99_5 = results.at(size * 0.995); + uint64_t p99_9 = results.at(size * 0.999); + + fprintf(file, "%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld\n", avg, min, p50, p90, p95, p99, p99_5, + p99_9, max); + + if (stdout) { + printf("\tAvg (us) %.3f\n", (double)avg / NSEC_PER_USEC); + printf("\tMin (us) %.3f\n", (double)min / NSEC_PER_USEC); + printf("\t50%% (us) %.3f\n", (double)p50 / NSEC_PER_USEC); + printf("\t90%% (us) %.3f\n", (double)p90 / NSEC_PER_USEC); + printf("\t95%% (us) %.3f\n", (double)p95 / NSEC_PER_USEC); + printf("\t99%% (us) %.3f\n", (double)p99 / NSEC_PER_USEC); + printf("\t99.5%% (us) %.3f\n", (double)p99_5 / NSEC_PER_USEC); + printf("\t99.9%% (us) %.3f\n", (double)p99_9 / NSEC_PER_USEC); + printf("\tMax (us) %.3f\n", (double)max / NSEC_PER_USEC); + } +} + +void write_lat_results_detailed(int issued, request_t *reqs) +{ + char fname[256]; + sprintf(fname, "%s/rocksdb_%s_%s_%.1f_%.1f_%.3f_%.2f_%d_%ld", FLAGS_output_path.c_str(), + program_invocation_short_name, sl_sched_policy_name(), + (double)FLAGS_get_service_time / NSEC_PER_USEC, + (double)FLAGS_range_query_service_time / NSEC_PER_USEC, FLAGS_range_query_ratio, + FLAGS_load, FLAGS_num_workers, time(NULL)); + FILE *file = fopen(fname, "w"); + assert(file != NULL); + printf("Write results to %s\n", fname); + + std::vector ingress_res; + std::vector queue_res; + std::vector handle_res; + std::vector total_res; + std::vector finished_reqs; + + fprintf(file, "id,type,timestamp,ingress,queue,handle,total\n"); + + uint64_t offset = reqs[0].gen_time; + int completed = 0, _issued = 0; + for (int i = 0; i < issued; i++) { + request_t *req = &reqs[i]; + if (offset + FLAGS_discard_time * NSEC_PER_SEC < req->gen_time) { + _issued++; + /* Some workers may not finish their work */ + if (req->end_time != 0 && req->start_time != 0) { + /* Discard cold results */ + completed++; + finished_reqs.push_back(req); + } + } + } + std::sort(finished_reqs.begin(), finished_reqs.end(), [](request_t *req1, request_t *req2) { + return req1->end_time - req1->gen_time < req2->end_time - req2->gen_time; + }); + for (int i = 0; i < completed; ++i) { + request_t *req = finished_reqs[i]; + + uint64_t ingress_lat = req->recv_time - req->gen_time; + uint64_t queue_lat = req->start_time - req->recv_time; + uint64_t handle_lat = req->end_time - req->start_time; + uint64_t total_lat = req->end_time - req->gen_time; + + ingress_res.push_back(ingress_lat); + queue_res.push_back(queue_lat); + handle_res.push_back(handle_lat); + total_res.push_back(total_lat); + + fprintf(file, "%d,%d,%ld,%ld,%ld,%ld,%ld\n", i, req->type, req->gen_time - offset, + ingress_lat, queue_lat, handle_lat, total_lat); + } + + double actual_tput = (double)completed / (FLAGS_run_time - FLAGS_discard_time); + printf("Results: \n"); + printf("\tMeans service time: %.3lf\n", mean_service_time_us()); + printf("\tMax throughput: %.3lf\n", max_throughput()); + printf("\tTarget throughput: %.3lf\n", target_throughput()); + printf("\tActual thoughput: %.3lf\n", actual_tput); + printf("\tCompleted/Issued %d/%d\n", completed, _issued); + + fprintf(file, "avg,min,p50,p90,p95,p99,p99_5,p99_9,max\n"); + write_percentiles(ingress_res, file); + write_percentiles(queue_res, file); + write_percentiles(handle_res, file); + write_percentiles(total_res, file, true); + + fprintf(file, "max_tput,target_tput,actual_tput,issued,completed\n"); + fprintf(file, "%.3lf,%.3lf,%.3lf,%d,%d\n", max_throughput(), target_throughput(), actual_tput, + _issued, completed); + fclose(file); +} + +void write_lat_results(int issued, request_t *reqs) +{ + + std::vector results; + FILE *file = fopen(FLAGS_output_path.c_str(), "a"); + assert(file != NULL); + uint64_t offset = reqs[0].gen_time; + for (int i = 0; i < issued; i++) { + request_t *req = &reqs[i]; + /* Some workers may not finish their work */ + if (req->end_time != 0 && req->start_time != 0) { + /* Discard cold results */ + if (offset + FLAGS_discard_time * NSEC_PER_SEC < req->gen_time) { + results.push_back(req->end_time - req->gen_time); + } + } + } + if (results.size() > 0) { + double run_time = FLAGS_run_time - FLAGS_discard_time; + double actual_tput = (double)results.size() / run_time; + std::sort(results.begin(), results.end()); + int size = results.size() % 2 == 0 ? results.size() - 1 : results.size(); + uint64_t min = results.front(); + uint64_t p50 = results.at(size * 0.5); + uint64_t p99 = results.at(size * 0.99); + uint64_t p99_5 = results.at(size * 0.995); + uint64_t p99_9 = results.at(size * 0.999); + uint64_t max = results.back(); + fprintf(file, "%.3lf,%.3lf,%lu,%lu,%lu,%lu,%lu,%lu\n", target_throughput(), actual_tput, + min, p50, p99, p99_5, p99_9, max); + } else { + fprintf(file, "%.3lf,0,0,0,0,0,0,0\n", target_throughput()); + } + fclose(file); +} + +void write_slo_results(int issued, request_t *reqs) +{ + + std::vector results; + FILE *file = fopen(FLAGS_output_path.c_str(), "a"); + assert(file != NULL); + uint64_t offset = reqs[0].gen_time; + for (int i = 0; i < issued; i++) { + request_t *req = &reqs[i]; + /* Some workers may not finish their work */ + if (req->end_time != 0 && req->start_time != 0) { + /* Discard cold results */ + if (offset + FLAGS_discard_time * NSEC_PER_SEC < req->gen_time) { + if (req->type == ROCKSDB_GET) { + results.push_back((double)(req->end_time - req->gen_time) / + FLAGS_get_service_time); + } else { + results.push_back((double)(req->end_time - req->gen_time) / + FLAGS_range_query_service_time); + } + } + } + } + if (results.size() > 0) { + double run_time = FLAGS_run_time - FLAGS_discard_time; + double actual_tput = (double)results.size() / run_time; + std::sort(results.begin(), results.end()); + int size = results.size() % 2 == 0 ? results.size() - 1 : results.size(); + double min = results.front(); + double p50 = results.at(size * 0.5); + double p99 = results.at(size * 0.99); + double p99_5 = results.at(size * 0.995); + double p99_9 = results.at(size * 0.999); + double max = results.back(); + fprintf(file, "%.3lf,%.3lf,%.3lf,%.3lf,%.3lf,%.3lf,%.3lf,%.3lf\n", target_throughput(), + actual_tput, min, p50, p99, p99_5, p99_9, max); + } else { + fprintf(file, "%.3lf,0,0,0,0,0,0,0\n", target_throughput()); + } + fclose(file); +} + +static char *rocksdb_gen_data(uint32_t entry, const char *prefix) +{ + char *data, *entry_str; + int length; + + data = (char *)malloc(sizeof(char) * (ROCKSDB_DATA_LENGTH + strlen(prefix) + 1)); + sprintf(data, "%s%0*u", prefix, ROCKSDB_DATA_LENGTH, entry); + + return data; +} + +rocksdb_t *rocksdb_init(const char *path, int cache_size) +{ + char *err = NULL; + int i; + rocksdb_t *db; + rocksdb_options_t *db_options; + rocksdb_block_based_table_options_t *table_options; + rocksdb_cache_t *block_cache; + rocksdb_writeoptions_t *write_options; + char *key, *value; + + db_options = rocksdb_options_create(); + rocksdb_options_set_create_if_missing(db_options, 1); + rocksdb_options_set_allow_mmap_reads(db_options, 1); + rocksdb_options_set_allow_mmap_writes(db_options, 1); + rocksdb_options_set_error_if_exists(db_options, 0); + rocksdb_options_set_compression(db_options, rocksdb_no_compression); + rocksdb_options_optimize_level_style_compaction(db_options, 512 * 1024 * 1024); + + table_options = rocksdb_block_based_options_create(); + block_cache = rocksdb_cache_create_lru(cache_size * 1024 * 1024); + rocksdb_block_based_options_set_block_cache(table_options, block_cache); + rocksdb_options_set_block_based_table_factory(db_options, table_options); + + db = rocksdb_open(db_options, path, &err); + assert(!db); + + /* Fill in the database */ + write_options = rocksdb_writeoptions_create(); + for (i = 0; i < ROCKSDB_NUM_ENTRIES; ++i) { + key = rocksdb_gen_data(i, "key"); + value = rocksdb_gen_data(i, "value"); + rocksdb_put(db, write_options, key, ROCKSDB_KEY_LENGTH, value, ROCKSDB_VALUE_LENGTH, &err); + assert(!err); + free(key), free(value); + } + + rocksdb_block_based_options_destroy(table_options); + rocksdb_cache_destroy(block_cache); + rocksdb_writeoptions_destroy(write_options); + rocksdb_options_destroy(db_options); + return db; +} + +void rocksdb_handle_get(rocksdb_t *db, request_t *req) +{ + __nsec service_time; + rocksdb_readoptions_t *readoptions; + size_t len; + char *err = NULL; + char *key; + + assert(req->type == ROCKSDB_GET); + + readoptions = rocksdb_readoptions_create(); + key = rocksdb_gen_data(req->work.get.entry, "key"); + rocksdb_get(db, readoptions, key, ROCKSDB_KEY_LENGTH, &len, &err); + assert(!err); + rocksdb_readoptions_destroy(readoptions); + free(key); +} + +void rocksdb_handle_range_query(rocksdb_t *db, request_t *req) +{ + __nsec service_time; + rocksdb_readoptions_t *readoptions; + size_t len; + char *err = NULL; + rocksdb_iterator_t *iter; + size_t i; + uint32_t start_entry; + char *key, *value; + const char *result; + + assert(req->type == ROCKSDB_RANGE); + + start_entry = req->work.range.start; + readoptions = rocksdb_readoptions_create(); + iter = rocksdb_create_iterator(db, readoptions); + key = rocksdb_gen_data(start_entry, "key"); + rocksdb_iter_seek(iter, key, ROCKSDB_KEY_LENGTH); + for (i = 0; i < req->work.range.size; ++i) { + assert(rocksdb_iter_valid(iter)); + value = rocksdb_gen_data(start_entry + i, "value"); + result = rocksdb_iter_value(iter, &len); + assert(!strcmp(result, value)); + rocksdb_iter_next(iter); + free(value); + } + rocksdb_readoptions_destroy(readoptions); + free(key); +} + +void benchmark_request(rocksdb_t *db) +{ + int i; + request_t req; + const int get_times = 10000; + const int range_query_times = 1000; + double get_avg, range_avg; + __nsec start; + + get_avg = 0; + req.type = ROCKSDB_GET; + for (i = 0; i < get_times; i++) { + if (FLAGS_fake_work) { + start = now_ns(); + fake_work(FLAGS_get_service_time); + get_avg += (double)(now_ns() - start) / NSEC_PER_USEC; + } else { + req.work.get.entry = random_int_uniform_distribution(0, ROCKSDB_NUM_ENTRIES); + req.start_time = now_ns(); + rocksdb_handle_get(db, &req); + req.end_time = now_ns(); + get_avg += (double)(req.end_time - req.start_time) / NSEC_PER_USEC; + } + } + printf(" GET Avg (us) %.3f\n", get_avg / get_times); + + range_avg = 0; + req.type = ROCKSDB_RANGE; + for (i = 0; i < range_query_times; i++) { + if (FLAGS_fake_work) { + start = now_ns(); + fake_work(FLAGS_range_query_service_time); + range_avg += (double)(now_ns() - start) / NSEC_PER_USEC; + } else { + req.work.range.size = FLAGS_range_query_size; + req.work.range.start = + random_int_uniform_distribution(0, ROCKSDB_NUM_ENTRIES - req.work.range.size); + req.start_time = now_ns(); + rocksdb_handle_range_query(db, &req); + req.end_time = now_ns(); + range_avg += (double)(req.end_time - req.start_time) / NSEC_PER_USEC; + } + } + printf(" RANGE Avg (us) %.3f\n", range_avg / range_query_times); +} + +void init_request(request_t *req) +{ + req->recv_time = 0; + req->start_time = 0; + req->end_time = 0; + req->type = ROCKSDB_GET; + req->work.get.entry = random_int_uniform_distribution(0, ROCKSDB_NUM_ENTRIES - 1); +} + +void init_request_bimodal(request_t *req, double ratio, int size) +{ + bool range_query = false; + + req->recv_time = 0; + req->start_time = 0; + req->end_time = 0; + + range_query = random_bernouli_distribution(ratio); + if (range_query) { + req->type = ROCKSDB_RANGE; + req->work.range.size = size; + req->work.range.start = + random_int_uniform_distribution(0, ROCKSDB_NUM_ENTRIES - req->work.range.size); + } else { + req->type = ROCKSDB_GET; + req->work.get.entry = random_int_uniform_distribution(0, ROCKSDB_NUM_ENTRIES - 1); + } +} + +void fake_work(uint64_t service_time) +{ + uint64_t i = 0, n = service_time * CPU_FREQ_GHZ; + do { + asm volatile("nop"); + i++; + } while (i < n); +} diff --git a/synthetic/rocksdb/common.h b/synthetic/rocksdb/common.h new file mode 100644 index 0000000..1ccbded --- /dev/null +++ b/synthetic/rocksdb/common.h @@ -0,0 +1,97 @@ +#ifndef _SKYLOFT_EXPERIMENT_COMMON_H_ +#define _SKYLOFT_EXPERIMENT_COMMON_H_ + +#include + +#include +#include + +#include +#include + +DECLARE_string(rocksdb_path); +DECLARE_int32(rocksdb_cache_size); +DECLARE_double(range_query_ratio); +DECLARE_int32(range_query_size); +DECLARE_int32(get_service_time); +DECLARE_int32(range_query_service_time); +DECLARE_double(load); +DECLARE_int32(run_time); +DECLARE_int32(discard_time); +DECLARE_string(output_path); +DECLARE_int32(num_workers); +DECLARE_bool(bench_request); +DECLARE_bool(fake_work); +DECLARE_int32(preemption_quantum); +DECLARE_bool(detailed_print); +DECLARE_bool(slowdown_print); +DECLARE_int32(guaranteed_cpus); +DECLARE_int32(adjust_quantum); +DECLARE_double(congestion_thresh); + +enum { + ROCKSDB_GET, + ROCKSDB_RANGE, +}; + +typedef struct { + uint8_t type; + union { + struct { + uint32_t entry; + } get; + struct { + uint32_t start; + uint32_t size; + } range; + } work; + /* When the request is generated. */ + __nsec gen_time; + /* When the request is received. */ + __nsec recv_time; + /* When the request is assigned to a worker. */ + __nsec assigned_time; + /* When the request started to be handled by a worker. */ + __nsec start_time; + /* When the worker finished handling the request. */ + __nsec end_time; + struct list_node link; +} request_t; + +static inline double mean_service_time_us() +{ + return (double)FLAGS_get_service_time / NSEC_PER_USEC * (1 - FLAGS_range_query_ratio) + + (double)FLAGS_range_query_service_time / NSEC_PER_USEC * FLAGS_range_query_ratio; +} + +static inline double max_throughput() +{ + return USEC_PER_SEC / mean_service_time_us() * FLAGS_num_workers; +} + +static inline double target_throughput() { return max_throughput() * FLAGS_load; } + +void write_lat_results_detailed(int issued, request_t *reqs); +void write_lat_results(int issued, request_t *reqs); +void write_slo_results(int issued, request_t *reqs); + +#define ROCKSDB_NUM_ENTRIES 1000000 +#define ROCKSDB_DATA_LENGTH 16 +#define ROCKSDB_KEY_LENGTH (3 + ROCKSDB_DATA_LENGTH) +#define ROCKSDB_VALUE_LENGTH (5 + ROCKSDB_DATA_LENGTH) + +rocksdb_t *rocksdb_init(const char *path, int cache_size); +void rocksdb_handle_get(rocksdb_t *db, request_t *req); +void rocksdb_handle_range_query(rocksdb_t *db, request_t *req); +void benchmark_request(rocksdb_t *db); +void init_request(request_t *req); +void init_request_file(request_t *req); +void init_request_bimodal(request_t *req, double ratio, int size); +void fake_work(uint64_t service_time); + +/* partitioned-FCFS */ +#define MQ 1 + +#define CPU_FREQ_GHZ 2 + +#endif diff --git a/synthetic/rocksdb/jbsq.h b/synthetic/rocksdb/jbsq.h new file mode 100644 index 0000000..4b4ba58 --- /dev/null +++ b/synthetic/rocksdb/jbsq.h @@ -0,0 +1,23 @@ +#ifndef _SKYLOFT_EXPERIMENT_JBSQ_H_ +#define _SKYLOFT_EXPERIMENT_JBSQ_H_ + +/* Join-Bounded-Shortest-Queue Policy */ + +#define JBSQ_K 2 +#define JBSQ_MASK (JBSQ_K - 1) + +#define JBSQ(type, name) \ + struct { \ + type _data[JBSQ_K]; \ + int _head, _tail; \ + } name +#define JBSQ_INIT(q) ((q)->_head = (q)->_tail = 0) +#define JBSQ_DATA(q) ((q)->_data) + +#define JBSQ_LEN(q) ((q)->_tail - (q)->_head) +#define JBSQ_EMPTY(q) ((q)->_tail == (q)->_head) +#define JBSQ_FULL(q) ((q)->_tail == ((q)->_head + JBSQ_K)) +#define JBSQ_PUSH(q, e) ((q)->_data[((q)->_tail++) & JBSQ_MASK] = (e)) +#define JBSQ_POP(q) ((q)->_data[((q)->_head++) & JBSQ_MASK]) + +#endif \ No newline at end of file diff --git a/synthetic/rocksdb/native.cc b/synthetic/rocksdb/native.cc new file mode 100644 index 0000000..b1f87d1 --- /dev/null +++ b/synthetic/rocksdb/native.cc @@ -0,0 +1,301 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "random.h" + +#define SQ 1 +// #define MQ 1 + +enum { + IDLE, + RUNNING, + FINISHED, + PREEMPTED, +}; + +typedef struct { + /* Bound to an isolated CPU */ + int cpu_id; + /* Database handle */ + rocksdb_t *db; + /* Pointer to dispatcher */ + void *dispatcher; + /* Worker running status */ + int status; +#ifdef MQ + /* When a new request arrives */ + __nsec next; + /* Track allocated requests */ + request_t *requests; + list_head req_list; + int issued; +#endif +} __aligned(CACHE_LINE_SIZE) worker_t; + +typedef struct { +#ifdef SQ + /* When a new request arrives */ + __nsec next; + /* Track allocated requests */ + request_t *requests; + int issued; + list_head req_list; + spinlock_t req_lock; +#endif + /* Track worker status */ + worker_t *workers; +} dispatcher_t; + +dispatcher_t *dispatcher_create(rocksdb_t *db); +void dispatcher_destroy(dispatcher_t *dispatcher); +void do_dispatching(dispatcher_t *dispatcher); + +dispatcher_t *dispatcher_create(rocksdb_t *db) +{ + int i, j; + dispatcher_t *dispatcher; + request_t *req; + + int target_tput = target_throughput(); + int num_reqs = target_tput * FLAGS_run_time * 2; + + dispatcher = (dispatcher_t *)malloc(sizeof(dispatcher_t)); +#ifdef SQ + dispatcher->next = 0; + dispatcher->requests = (request_t *)malloc(sizeof(request_t) * num_reqs); + + double timestamp = 0; + for (i = 0; i < num_reqs; i++) { + timestamp += random_exponential_distribution(); + init_request_bimodal(&dispatcher->requests[i], FLAGS_range_query_ratio, + FLAGS_range_query_size); + dispatcher->requests[i].gen_time = timestamp * NSEC_PER_USEC; + } + list_head_init(&dispatcher->req_list); + spin_lock_init(&dispatcher->req_lock); + dispatcher->issued = 0; +#endif + dispatcher->workers = (worker_t *)malloc(sizeof(worker_t) * FLAGS_num_workers); + for (i = 0; i < FLAGS_num_workers; i++) { + dispatcher->workers[i].cpu_id = i; + dispatcher->workers[i].db = db; + dispatcher->workers[i].dispatcher = (void *)dispatcher; + dispatcher->workers[i].status = IDLE; +#ifdef MQ + dispatcher->workers[i].requests = (request_t *)malloc(sizeof(request_t) * num_reqs); + for (j = 0; j < num_reqs; j++) init_request(&dispatcher->workers[i].requests[j]); + dispatcher->workers[i].issued = 0; + dispatcher->workers[i].next = 0; + list_head_init(&dispatcher->workers[i].req_list); +#endif + } + + return dispatcher; +} + +void dispatcher_destroy(dispatcher_t *dispatcher) +{ + int i; + + free(dispatcher->workers); +#ifdef SQ + free(dispatcher->requests); +#elif MQ + for (i = 0; i < FLAGS_num_cpus; i++) free(dispatcher->workers[i].requests); +#endif + free(dispatcher); +} + +#ifdef SQ +void poll_synthetic_network(dispatcher_t *dispatcher, __nsec start_time) +{ + spin_lock(&dispatcher->req_lock); + request_t *req = &dispatcher->requests[dispatcher->issued]; + + if (now_ns() < start_time + req->gen_time) { + spin_unlock(&dispatcher->req_lock); + return; + } + + req->gen_time += start_time; + req->recv_time = now_ns(); + dispatcher->issued++; + + list_add_tail(&dispatcher->req_list, &req->link); + spin_unlock(&dispatcher->req_lock); +} +#elif MQ +void poll_synthetic_network(dispatcher_t *dispatcher) +{ + request_t *req; + worker_t *worker = &dispatcher->workers[sl_current_cpu_id()]; + + if (now_ns() < worker->next) + return; + + // Avoid issues due to double precision; random generator might be slow + worker->next = now_ns() + NSEC_PER_USEC * random_exponential_distribution(); + + req = &worker->requests[worker->issued++]; + list_add_tail(&worker->req_list, &req->link); + req->recv_time = now_ns(); +} +#endif + +/* Run-to-complete request handler */ +static void* request_handler(void *arg) +{ + request_t *req; + worker_t *worker = (worker_t *)arg; + dispatcher_t *dispatcher = (dispatcher_t *)worker->dispatcher; + +#ifdef SQ + spin_lock(&dispatcher->req_lock); + req = list_pop(&dispatcher->req_list, request_t, link); + spin_unlock(&dispatcher->req_lock); +#elif MQ + req = list_pop(&worker->req_list, request_t, link); +#endif + + if (req == NULL) { + worker->status = IDLE; + return 0; + } + + req->start_time = now_ns(); + if (req->type == ROCKSDB_GET) { + rocksdb_handle_get(worker->db, req); + } else if (req->type == ROCKSDB_RANGE) { + rocksdb_handle_range_query(worker->db, req); + } + req->end_time = now_ns(); + + worker->status = FINISHED; + return 0; +} + +static __nsec global_start; + +static void* worker_percpu(void *arg) +{ + request_t *req; + worker_t *worker = (worker_t *)arg; + dispatcher_t *dispatcher = (dispatcher_t *)worker->dispatcher; + assert(worker != NULL); + + printf("Worker %d is running ...\n", worker->cpu_id); + + __nsec start = now_ns(); + __nsec end = now_ns() + FLAGS_run_time * NSEC_PER_SEC; + while (now_ns() < end) { + poll_synthetic_network(dispatcher, start); + + worker->status = RUNNING; + sl_task_spawn(request_handler, (void *)worker, 0); + while (worker->status == RUNNING) sl_task_yield(); + } + return 0; +} + +void do_dispatching(dispatcher_t *dispatcher) +{ + for (int i = 1; i < FLAGS_num_workers; i++) { + sl_task_spawn_oncpu(i, worker_percpu, (void *)&dispatcher->workers[i], 0); + } + worker_percpu((void *)&dispatcher->workers[0]); +} + +static void* experiment_main(void *arg) +{ + rocksdb_t *db; + worker_t *curr_worker; + dispatcher_t *dispatcher; + int i, j; + + if (FLAGS_load < 0 || FLAGS_load > 1) { + printf("Invalid load: %f\n", FLAGS_load); + return (void *)-EINVAL; + } + if (FLAGS_get_service_time < 0 || FLAGS_get_service_time > 1000) { + printf("Invalid get_service_time: %f\n", FLAGS_get_service_time); + return (void *)-EINVAL; + } + + printf("Experiment running on CPU %d, num workers: %d\n", sl_current_cpu_id(), + FLAGS_num_workers); + + // TODO: multiple load dispatchers + printf("RocksDB path: %s\n", FLAGS_rocksdb_path.c_str()); + printf("Initializing RocksDB...\n"); + db = rocksdb_init(FLAGS_rocksdb_path.c_str(), FLAGS_rocksdb_cache_size); + if (FLAGS_bench_request) + benchmark_request(db); + + // TODO: RQ might be drained + random_init(); + double mean_arrive_time_us = 1e6 / target_throughput(); + random_exponential_distribution_init(1.0 / mean_arrive_time_us); + + printf("Initializing load dispatcher...\n"); + dispatcher = dispatcher_create(db); + + __nsec start = now_ns(); + for (i = 0; i < 10000; i++) random_exponential_distribution(); + printf("Benchmarking random generator: %.3f ns\n", (double)(now_ns() - start) / 10000); + + printf("Generating requests...\n"); + do_dispatching(dispatcher); + +#ifdef SQ + write_lat_results_detailed(dispatcher->issued, dispatcher->requests); +#elif MQ + int issued = 0; + int num_reqs = target_throughput() * FLAGS_run_time * FLAGS_num_cpus; + request_t *requests = (request_t *)malloc(sizeof(request_t) * num_reqs); + for (i = 0; i < FLAGS_num_cpus; i++) + for (j = 0; j < dispatcher->workers[i].issued; j++) { + if (issued > num_reqs) + break; + requests[issued++] = dispatcher->workers[i].requests[j]; + } + write_lat_results_detailed(issued, requests); +#endif + dispatcher_destroy(dispatcher); + rocksdb_close(db); + printf("Experiment exits gracefully.\n"); + return 0; +} + +int main(int argc, char **argv) +{ + gflags::SetUsageMessage("test_rocksdb [options]"); + gflags::ParseCommandLineFlags(&argc, &argv, true); + gflags::ShutDownCommandLineFlags(); + + if (FLAGS_num_workers > USED_CPUS) { + printf("Too many CPUs %d > %d\n", FLAGS_num_workers, USED_CPUS); + return 0; + } + + sl_libos_start(experiment_main, NULL); +} diff --git a/synthetic/rocksdb/random.cc b/synthetic/rocksdb/random.cc new file mode 100644 index 0000000..7c5583f --- /dev/null +++ b/synthetic/rocksdb/random.cc @@ -0,0 +1,47 @@ +#include + +#include "random.h" + +static std::mt19937 rng; + +void random_init(void) +{ + srand(time(NULL)); + rng.seed(std::random_device()()); +} + +int random_int_uniform_distribution(int start, int end) +{ + std::uniform_int_distribution dist(start, end); + return dist(rng); +} + +double random_real_uniform_distribution(double start, double end) +{ + std::uniform_real_distribution dist(start, end); + return dist(rng); +} + +int random_int_bionomial_distribution(int start, int end) +{ + std::binomial_distribution dist(start, end); + return dist(rng); +} + +bool random_bernouli_distribution(double p) +{ + std::bernoulli_distribution dist(p); + return dist(rng); +} + +static std::exponential_distribution exp_dist; + +void random_exponential_distribution_init(double lambda) +{ + exp_dist = std::exponential_distribution(lambda); +} + +double random_exponential_distribution(void) +{ + return exp_dist(rng); +} \ No newline at end of file diff --git a/synthetic/rocksdb/random.h b/synthetic/rocksdb/random.h new file mode 100644 index 0000000..d4b51ef --- /dev/null +++ b/synthetic/rocksdb/random.h @@ -0,0 +1,13 @@ +#ifndef _SKYLOFT_EXPERIMENT_RANDOM_H_ +#define _SKYLOFT_EXPERIMENT_RANDOM_H_ + +void random_init(void); + +int random_int_uniform_distribution(int start, int end); +double random_real_uniform_distribution(double start, double end); +int random_int_bionomial_distribution(int start, int end); +bool random_bernouli_distribution(double p); +void random_exponential_distribution_init(double lambda); +double random_exponential_distribution(void); + +#endif \ No newline at end of file diff --git a/synthetic/rocksdb/shinjuku.cc b/synthetic/rocksdb/shinjuku.cc new file mode 100644 index 0000000..3637c1c --- /dev/null +++ b/synthetic/rocksdb/shinjuku.cc @@ -0,0 +1,202 @@ +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "random.h" + +typedef struct { + request_t *requests; + int issued; +} dispatcher_t; + +typedef struct { + int num_workers; + int preemption_quantum; + int guaranteed_cpus; + int adjust_quantum; + double congestion_thresh; +} params_t; + +static dispatcher_t *g_dispatcher; +static rocksdb_t *g_db; + +dispatcher_t *dispatcher_create(void); +void dispatcher_destroy(dispatcher_t *dispatcher); +void do_dispatching(dispatcher_t *dispatcher); + +dispatcher_t *dispatcher_create(void) +{ + int i; + dispatcher_t *dispatcher; + request_t *req; + bool range_query = false; + + int target_tput = target_throughput(); + int num_reqs = target_tput * FLAGS_run_time * 2; + + dispatcher = (dispatcher_t *)malloc(sizeof(dispatcher_t)); + dispatcher->requests = (request_t *)malloc(sizeof(request_t) * num_reqs); + + double timestamp = 0; + for (i = 0; i < num_reqs; i++) { + timestamp += random_exponential_distribution(); + init_request_bimodal(&dispatcher->requests[i], FLAGS_range_query_ratio, + FLAGS_range_query_size); + dispatcher->requests[i].gen_time = timestamp * NSEC_PER_USEC; + } + dispatcher->issued = 0; + + return dispatcher; +} + +void dispatcher_destroy(dispatcher_t *dispatcher) +{ + free(dispatcher->requests); + free(dispatcher); +} + +static inline request_t *poll_synthetic_network(dispatcher_t *dispatcher, __nsec start_time) +{ + request_t *req = &dispatcher->requests[dispatcher->issued]; + + if (now_ns() < start_time + req->gen_time) + return NULL; + + req->gen_time += start_time; + req->recv_time = now_ns(); + dispatcher->issued++; + return req; +} + +/* Run-to-complete request handler */ +static void worker_request_handler(void *arg) +{ + request_t *req = (request_t *)arg; + + req->start_time = now_ns(); + if (FLAGS_fake_work) { + if (req->type == ROCKSDB_GET) { + fake_work(FLAGS_get_service_time); + } else if (req->type == ROCKSDB_RANGE) { + fake_work(FLAGS_range_query_service_time); + } + } else { + if (req->type == ROCKSDB_GET) { + rocksdb_handle_get(g_db, req); + } else if (req->type == ROCKSDB_RANGE) { + rocksdb_handle_range_query(g_db, req); + } + } + req->end_time = now_ns(); +} + +void do_dispatching(dispatcher_t *dispatcher) +{ + request_t *req; + __nsec start, end; + + sl_sched_poll(); + + start = now_ns(); + end = now_ns() + FLAGS_run_time * NSEC_PER_SEC; + while (now_ns() < end) { + req = poll_synthetic_network(dispatcher, start); + if (req) + sl_task_spawn(worker_request_handler, req, 0); + + sl_sched_poll(); + } +} + +static void experiment_main(void *arg) +{ + int i; + params_t params; + + if (FLAGS_load < 0) { + printf("Invalid load: %f\n", FLAGS_load); + return; + } + if (FLAGS_get_service_time < 0 || (double)FLAGS_get_service_time > 1000 * NSEC_PER_USEC) { + printf("Invalid get_service_time: %f\n", (double)FLAGS_get_service_time / NSEC_PER_USEC); + return; + } + + params.num_workers = FLAGS_num_workers; + params.preemption_quantum = FLAGS_preemption_quantum; + params.guaranteed_cpus = FLAGS_guaranteed_cpus; + params.adjust_quantum = FLAGS_adjust_quantum; + params.congestion_thresh = FLAGS_congestion_thresh; + if (sl_sched_set_params((void *)¶ms) < 0) { + printf("Invalid params\n"); + return; + } + + printf("Dispatcher running on CPU %d, num workers: %d\n", sl_current_cpu_id(), + FLAGS_num_workers); + + if (!FLAGS_fake_work) { + printf("RocksDB path: %s\n", FLAGS_rocksdb_path.c_str()); + printf("Initializing RocksDB...\n"); + g_db = rocksdb_init(FLAGS_rocksdb_path.c_str(), FLAGS_rocksdb_cache_size); + } + if (FLAGS_bench_request) + benchmark_request(g_db); + + random_init(); + double mean_arrive_time_us = 1e6 / target_throughput(); + random_exponential_distribution_init(1.0 / mean_arrive_time_us); + + printf("Initializing load dispatcher...\n"); + g_dispatcher = dispatcher_create(); + + printf("Generating requests...\n"); + do_dispatching(g_dispatcher); + + if (FLAGS_detailed_print) + write_lat_results_detailed(g_dispatcher->issued, g_dispatcher->requests); + else if (FLAGS_slowdown_print) + write_slo_results(g_dispatcher->issued, g_dispatcher->requests); + else + write_lat_results(g_dispatcher->issued, g_dispatcher->requests); + + sl_dump_tasks(); + sl_task_yield(); + + dispatcher_destroy(g_dispatcher); + if (!FLAGS_fake_work) + rocksdb_close(g_db); + printf("Experiment exits gracefully.\n"); + + exit(EXIT_SUCCESS); +} + +int main(int argc, char **argv) +{ + gflags::SetUsageMessage("test_rocksdb [options]"); + gflags::ParseCommandLineFlags(&argc, &argv, true); + gflags::ShutDownCommandLineFlags(); + + if (!strstr(sl_sched_policy_name(), "sq")) { + printf("Must use single_queue scheduler\n"); + return 0; + } + + sl_libos_start(experiment_main, NULL); +} diff --git a/synthetic/rocksdb/shinjuku_old.cc b/synthetic/rocksdb/shinjuku_old.cc new file mode 100644 index 0000000..1efa8e6 --- /dev/null +++ b/synthetic/rocksdb/shinjuku_old.cc @@ -0,0 +1,311 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "jbsq.h" +#include "random.h" + +enum { + IDLE, + RUNNING, + FINISHED, + PREEMPTED, +}; + +typedef struct { +} jbsq_t; + +typedef struct { + /* Bound to an isolated CPU */ + int cpu_id; +#ifdef MQ + list_head reqs; + spinlock_t req_lock; +#elif JBSQ_K > 1 + /* Requests to handle */ + JBSQ(request_t *, reqs); +#else + request_t *req; +#endif + /* Database handle */ + rocksdb_t *db; + /* Worker running status */ + int status; +} __aligned(CACHE_LINE_SIZE) worker_t; + +typedef struct { + /* When a new request arrives */ + __nsec next; + /* Track allocated requests */ + request_t *requests; + int issued; + list_head req_list; + /* Track worker status */ + worker_t *workers; +} dispatcher_t; + +dispatcher_t *dispatcher_create(rocksdb_t *db); +void dispatcher_destroy(dispatcher_t *dispatcher); +void do_dispatching(dispatcher_t *dispatcher); + +dispatcher_t *dispatcher_create(rocksdb_t *db) +{ + int i; + dispatcher_t *dispatcher; + request_t *req; + bool range_query = false; + + int target_tput = target_throughput(); + int num_reqs = target_tput * FLAGS_run_time * 2; + + dispatcher = (dispatcher_t *)malloc(sizeof(dispatcher_t)); + dispatcher->requests = (request_t *)malloc(sizeof(request_t) * num_reqs); + + double timestamp = 0; + for (i = 0; i < num_reqs; i++) { + timestamp += random_exponential_distribution(); + init_request_bimodal(&dispatcher->requests[i], FLAGS_range_query_ratio, + FLAGS_range_query_size); + dispatcher->requests[i].gen_time = timestamp * NSEC_PER_USEC; + } + list_head_init(&dispatcher->req_list); + dispatcher->issued = 0; + dispatcher->workers = (worker_t *)malloc(sizeof(worker_t) * (FLAGS_num_workers + 1)); + for (i = 1; i < FLAGS_num_workers + 1; i++) { + dispatcher->workers[i].cpu_id = i; + dispatcher->workers[i].status = IDLE; + dispatcher->workers[i].db = db; +#ifdef MQ + list_head_init(&dispatcher->workers[i].reqs); + spin_lock_init(&dispatcher->workers[i].req_lock); +#elif JBSQ_K > 1 + JBSQ_INIT(&dispatcher->workers[i].reqs); +#endif + } + + return dispatcher; +} + +void dispatcher_destroy(dispatcher_t *dispatcher) +{ + free(dispatcher->workers); + free(dispatcher->requests); + free(dispatcher); +} + +void poll_synthetic_network(dispatcher_t *dispatcher, __nsec start_time) +{ + int i; + request_t *req = &dispatcher->requests[dispatcher->issued]; + + if (now_ns() < start_time + req->gen_time) + return; + + req->gen_time += start_time; + req->recv_time = now_ns(); + dispatcher->issued++; + +#ifdef MQ + // Choose a worker queue at random + i = (rand() % FLAGS_num_workers) + 1; + spin_lock(&dispatcher->workers[i].req_lock); + list_add_tail(&dispatcher->workers[i].reqs, &req->link); + spin_unlock(&dispatcher->workers[i].req_lock); + // printf("PUSH %d %p\n", i, req); +#else + list_add_tail(&dispatcher->req_list, &req->link); +#endif +} + +/* Run-to-complete request handler */ +static void worker_request_handler(void *arg) +{ + request_t *req; + worker_t *worker = (worker_t *)arg; + assert(worker != NULL && sl_current_cpu_id() == worker->cpu_id); + +#ifdef MQ + spin_lock(&worker->req_lock); + req = list_pop(&worker->reqs, request_t, link); + spin_unlock(&worker->req_lock); + // printf("POP %d %p\n", sl_current_cpu_id(), req); +#elif JBSQ_K > 1 + req = JBSQ_POP(&worker->reqs); +#else + req = worker->req; +#endif + + req->start_time = now_ns(); + if (req->type == ROCKSDB_GET) { + rocksdb_handle_get(worker->db, req); + } else if (req->type == ROCKSDB_RANGE) { + rocksdb_handle_range_query(worker->db, req); + } + req->end_time = now_ns(); + + worker->status = FINISHED; + // printf("START %lu END %lu\n", req->start_time, req->end_time); +} + +#if JBSQ_K > 1 && !defined(MQ) +static inline void choose_shortest(dispatcher_t *dispatcher) +{ + int i, min_i, len, min_len; + request_t *req; + + min_len = INT_MAX; + for (i = 1; i < FLAGS_num_workers + 1; i++) { + len = JBSQ_LEN(&dispatcher->workers[i].reqs); + if (len < min_len && len < JBSQ_K) { + min_len = len; + min_i = i; + } + } + + if (min_len == INT_MAX) + return; + + req = list_pop(&dispatcher->req_list, request_t, link); + if (req != NULL) + JBSQ_PUSH(&dispatcher->workers[min_i].reqs, req); +} +#endif + +static inline void handle_worker(dispatcher_t *dispatcher, int i) +{ + request_t *req; + worker_t *worker = &dispatcher->workers[i]; + +#ifdef MQ + if (worker->status != RUNNING && !list_empty(&worker->reqs)) { + worker->status = RUNNING; + sl_task_spawn_oncpu(i, worker_request_handler, (void *)worker, 0); + } +#elif JBSQ_K > 1 + if (worker->status != RUNNING && !JBSQ_EMPTY(&worker->reqs)) { + worker->status = RUNNING; + sl_task_spawn_oncpu(i, worker_request_handler, (void *)worker, 0); + } +#else + if (worker->status != RUNNING) { + worker->req = list_pop(&dispatcher->req_list, request_t, link); + if (worker->req != NULL) { + worker->status = RUNNING; + sl_task_spawn_oncpu(i, worker_request_handler, (void *)worker, 0); + } + } +#endif +} + +void do_dispatching(dispatcher_t *dispatcher) +{ + int i; + request_t *req; + worker_t *worker; + __nsec start; + int min_len, min_i; + + start = now_ns(); + + for (;;) { + poll_synthetic_network(dispatcher, start); +#if JBSQ_K > 1 && !defined(MQ) + choose_shortest(dispatcher); +#endif + for (i = 1; i < FLAGS_num_workers + 1; ++i) handle_worker(dispatcher, i); + + /* Terminate all workers */ + if (now_ns() - start > FLAGS_run_time * NSEC_PER_SEC) + break; + } + printf("Dispatcher %d\n", dispatcher->issued); +} + +static void experiment_main(void *arg) +{ + rocksdb_t *db; + worker_t *curr_worker; + dispatcher_t *dispatcher; + result_t result; + FILE *output; + int i; + + if (FLAGS_load < 0 || FLAGS_load > 1) { + printf("Invalid load: %f\n", FLAGS_load); + return; + } + if (FLAGS_get_service_time < 0 || FLAGS_get_service_time > 1000) { + printf("Invalid get_service_time: %f\n", FLAGS_get_service_time); + return; + } + + printf("Dispatcher running on CPU %d, num workers: %d\n", sl_current_cpu_id(), + FLAGS_num_workers); + + // TODO: multiple load dispatchers + printf("RocksDB path: %s\n", FLAGS_rocksdb_path.c_str()); + printf("Initializing RocksDB...\n"); + db = rocksdb_init(FLAGS_rocksdb_path.c_str(), FLAGS_rocksdb_cache_size); + if (FLAGS_bench_request) + benchmark_request(db); + + // TODO: RQ might be drained + random_init(); + double mean_arrive_time_us = 1e6 / target_throughput(); + random_exponential_distribution_init(1.0 / mean_arrive_time_us); + + printf("Initializing load dispatcher...\n"); + dispatcher = dispatcher_create(db); + + __nsec start = now_ns(); + for (i = 0; i < 10000; i++) random_exponential_distribution(); + printf("Benchmarking random generator: %.3f ns\n", (double)(now_ns() - start) / 10000); + + printf("Generating requests...\n"); + do_dispatching(dispatcher); + + printf("Results: \n"); + result = get_result(dispatcher->issued, dispatcher->requests, FLAGS_run_time); + output = fopen((FLAGS_output_path + "/rocksdb_" + std::to_string(time(NULL))).c_str(), "w"); + assert(output != NULL); + output_result(result, output); + print_result(result); + + fclose(output); + dispatcher_destroy(dispatcher); + rocksdb_close(db); + printf("Experiment exits gracefully.\n"); +} + +int main(int argc, char **argv) +{ + gflags::SetUsageMessage("test_rocksdb [options]"); + gflags::ParseCommandLineFlags(&argc, &argv, true); + gflags::ShutDownCommandLineFlags(); + + if (FLAGS_num_workers + 1 > USED_CPUS) { + printf("Too many workers %d + 1 > %d\n", FLAGS_num_workers, USED_CPUS); + return 0; + } + + sl_libos_start(experiment_main, NULL); +} diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt new file mode 100644 index 0000000..c0a1085 --- /dev/null +++ b/utils/CMakeLists.txt @@ -0,0 +1,5 @@ +add_definitions(-D_GNU_SOURCE) + +aux_source_directory(. UTILS_SRCS) +add_library(utils ${UTILS_SRCS}) +target_include_directories(utils PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) diff --git a/utils/bitmap.c b/utils/bitmap.c new file mode 100644 index 0000000..d81aca2 --- /dev/null +++ b/utils/bitmap.c @@ -0,0 +1,52 @@ +/* + * bitmap.c - a library for bit array manipulation + */ + +#include +#include + +static __always_inline int +bitmap_find_next(unsigned long *bits, int nbits, int pos, bool invert) +{ + unsigned long val, mask = ~((1UL << BITMAP_POS_SHIFT(pos)) - 1); + int idx; + + for (idx = align_down(pos, BITS_PER_LONG); + idx < nbits; idx += BITS_PER_LONG) { + val = bits[BITMAP_POS_IDX(idx)]; + if (invert) + val = ~val; + val &= mask; + if (val) + return MIN(idx + __builtin_ffsl(val) - 1, nbits); + mask = ~0UL; + } + + return nbits; +} + +/** + * bitmap_find_next_cleared - finds the next cleared bit + * @bits: the bitmap + * @nbits: the number of total bits + * @pos: the starting bit + * + * Returns the bit index of the next zero bit, or the total size if none exists. + */ +int bitmap_find_next_cleared(unsigned long *bits, int nbits, int pos) +{ + return bitmap_find_next(bits, nbits, pos, true); +} + +/** + * bitmap_find_next_set - finds the next set bit + * @bits: the bitmap + * @nbits: the number of total bits + * @pos: the starting bit + * + * Returns the bit index of the next zero bit, or the total size if none exists. + */ +int bitmap_find_next_set(unsigned long *bits, int nbits, int pos) +{ + return bitmap_find_next(bits, nbits, pos, false); +} diff --git a/utils/include/utils/assert.h b/utils/include/utils/assert.h new file mode 100644 index 0000000..3bf0cb9 --- /dev/null +++ b/utils/include/utils/assert.h @@ -0,0 +1,130 @@ +/* + * assert.h - support for assertions + */ + +#pragma once + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +extern void logk_bug(bool fatal, const char *expr, const char *file, int line, const char *func); + +/* this helper trys to check a run-time assertion at built-time if possible */ +#if !defined(__CHECKER__) && !defined(__cplusplus) +#define __build_assert_if_constant(cond) \ + _Static_assert(__builtin_choose_expr(__builtin_constant_p(cond), (cond), true), \ + "run-time assertion caught at build-time") +#else /* __CHECKER__ */ +#define __build_assert_if_constant(cond) +#endif /* __CHECKER__ */ + +#undef assert +/* these assertions will get compiled out in release builds (fails on false) */ +#if DEBUG +#define assert(cond) \ + do { \ + __build_assert_if_constant(cond); \ + if (unlikely(!(cond))) { \ + logk_bug(true, __cstr(cond), __FILE__, __LINE__, __func__); \ + __builtin_unreachable(); \ + } \ + } while (0) +#else /* DEBUG */ +#define assert(cond) \ + do { \ + __build_assert_if_constant(cond); \ + (void)sizeof(cond); \ + } while (0) +#endif /* DEBUG */ + +/** + * BUG - a fatal code-path that doesn't compile out in release builds + */ +#define BUG() \ + do { \ + logk_bug(true, "false", __FILE__, __LINE__, __func__); \ + __builtin_unreachable(); \ + } while (0) + +/** + * BUG_ON - a fatal check that doesn't compile out in release builds + * @condition: the condition to check (fails on true) + */ +#define BUG_ON(cond) \ + do { \ + __build_assert_if_constant(!(cond)); \ + if (unlikely(cond)) { \ + logk_bug(true, __cstr(cond), __FILE__, __LINE__, __func__); \ + __builtin_unreachable(); \ + } \ + } while (0) + +/** + * WARN - a non-fatal code-path that doesn't compile out in release builds + */ +#define WARN() logk_bug(false, "false", __FILE__, __LINE__, __func__); + +/** + * WARN_ON - a non-fatal check that doesn't compile out in release builds + * @condition: the condition to check (fails on true) + */ +#define WARN_ON(cond) \ + ({ \ + __build_assert_if_constant(!(cond)); \ + bool __result = !!(cond); \ + if (unlikely(__result)) \ + logk_bug(false, __cstr(cond), __FILE__, __LINE__, __func__); \ + __result; \ + }) + +/** + * WARN_ON_ONCE - a non-fatal check that doesn't compile out in release builds + * @condition: the condition to check (fails on true) + */ +#define WARN_ON_ONCE(cond) \ + ({ \ + static bool __once; \ + bool __result = !!(cond); \ + __build_assert_if_constant(!(cond)); \ + if (unlikely(!__once && __result)) { \ + __once = true; \ + logk_bug(false, __cstr(cond), __FILE__, __LINE__, __func__); \ + } \ + __result; \ + }) + +/** + * BUILD_ASSERT - assert a build-time condition. + * @cond: the compile-time condition which must be true. + * + * Your compile will fail if the condition isn't true, or can't be evaluated + * by the compiler. + */ +#if !defined(__CHECKER__) && !defined(__cplusplus) +#define BUILD_ASSERT(cond) _Static_assert(cond, "build-time condition failed") +#else /* __CHECKER__ */ +#define BUILD_ASSERT(cond) +#endif /* __CHECKER__ */ + +/** + * BUILD_ASSERT_MSG - assert a build-time condition, printing a custom failure + * message. + * @cond: the compile-time condition which must be true. + * @msg: the message to print on failure. + * + * Your compile will fail if the condition isn't true, or can't be evaluated + * by the compiler. + */ +#if !defined(__CHECKER__) && !defined(__cplusplus) +#define BUILD_ASSERT_MSG(cond, msg) _Static_assert(cond, msg) +#else /* __CHECKER__ */ +#define BUILD_ASSERT_MSG(cond, msg) +#endif /* __CHECKER__ */ + +#ifdef __cplusplus +} +#endif diff --git a/utils/include/utils/atomic.h b/utils/include/utils/atomic.h new file mode 100644 index 0000000..8e0c139 --- /dev/null +++ b/utils/include/utils/atomic.h @@ -0,0 +1,18 @@ +/* + * atomic.h - utilities for atomic memory ops + * + * With the exception of *_read and *_write, consider these operations full + * barriers. + */ + +#pragma once + +#include + +#define atomic_load_relax(ptr) __atomic_load_n(ptr, __ATOMIC_RELAXED) +#define atomic_load_acq(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE) +#define atomic_load_con(ptr) __atomic_load_n(ptr, __ATOMIC_CONSUME) +#define atomic_store_rel(ptr, val) __atomic_store_n(ptr, val, __ATOMIC_RELEASE) +#define atomic_inc(ptr) atomic_fetch_add(ptr, 1) +#define atomic_dec(ptr) atomic_fetch_sub(ptr, 1) +#define atomic_dec_zero(ptr) (atomic_dec(ptr) == 1) diff --git a/utils/include/utils/bitmap.h b/utils/include/utils/bitmap.h new file mode 100644 index 0000000..2417c69 --- /dev/null +++ b/utils/include/utils/bitmap.h @@ -0,0 +1,127 @@ +/* + * bitmap.h - a library for bit array manipulation + */ + +#pragma once + +#include +#include + +#include +#include + +#define BITS_PER_LONG (sizeof(long) * 8) +#define BITMAP_LONG_SIZE(nbits) div_up(nbits, BITS_PER_LONG) + +#define DEFINE_BITMAP(name, nbits) unsigned long name[BITMAP_LONG_SIZE(nbits)] + +typedef unsigned long *bitmap_ptr; + +#define BITMAP_POS_IDX(pos) ((pos) / BITS_PER_LONG) +#define BITMAP_POS_SHIFT(pos) ((pos) % BITS_PER_LONG) + +/** + * bitmap_set - sets a bit in the bitmap + * @bits: the bitmap + * @pos: the bit number + */ +static inline void bitmap_set(unsigned long *bits, int pos) +{ + bits[BITMAP_POS_IDX(pos)] |= (1ul << BITMAP_POS_SHIFT(pos)); +} + +/** + * bitmap_clear - clears a bit in the bitmap + * @bits: the bitmap + * @pos: the bit number + */ +static inline void bitmap_clear(unsigned long *bits, int pos) +{ + bits[BITMAP_POS_IDX(pos)] &= ~(1ul << BITMAP_POS_SHIFT(pos)); +} + +/** + * bitmap_test - tests if a bit is set in the bitmap + * @bits: the bitmap + * @pos: the bit number + * + * Returns true if the bit is set, otherwise false. + */ +static inline bool bitmap_test(unsigned long *bits, int pos) +{ + return (bits[BITMAP_POS_IDX(pos)] & (1ul << BITMAP_POS_SHIFT(pos))) != 0; +} + +/** + * bitmap_atomic_set - atomically sets a bit in the bitmap + * @bits: the bitmap + * @pos: the bit number + */ +static inline void bitmap_atomic_set(unsigned long *bits, int pos) +{ + atomic_fetch_or(&bits[BITMAP_POS_IDX(pos)], (1ul << BITMAP_POS_SHIFT(pos))); +} + +/** + * bitmap_atomic_test_and_set - atomically tests and sets a bit in the bitmap + * @bits: the bitmap + * @pos; the bit number + */ +static inline bool bitmap_atomic_test_and_set(unsigned long *bits, int pos) +{ + unsigned long bit = (1ul << BITMAP_POS_SHIFT(pos)); + return (atomic_fetch_or(&bits[BITMAP_POS_IDX(pos)], bit) & bit) != 0; +} + +/** + * bitmap_atomic_clear - atomically clears a bit in the bitmap + * @bits: the bitmap + * @pos: the bit number + */ +static inline void bitmap_atomic_clear(unsigned long *bits, int pos) +{ + atomic_fetch_and(&bits[BITMAP_POS_IDX(pos)], ~(1ul << BITMAP_POS_SHIFT(pos))); +} + +/** + * bitmap_atomic_test - atomically tests a bit in the bitmap + * @bits: the bitmap + * @pos: the bit number + */ +static inline bool bitmap_atomic_test(unsigned long *bits, int pos) +{ + return (atomic_load_acq(&bits[BITMAP_POS_IDX(pos)]) & (1ul << BITMAP_POS_SHIFT(pos))) != 0; +} + +/** + * bitmap_init - initializes a bitmap + * @bits: the bitmap + * @nbits: the number of total bits + * @state: if true, all bits are set, otherwise all bits are cleared + */ +static inline void bitmap_init(unsigned long *bits, int nbits, bool state) +{ + memset(bits, state ? 0xff : 0, BITMAP_LONG_SIZE(nbits) * sizeof(long)); +} + +extern int bitmap_find_next_set(unsigned long *bits, int nbits, int pos); +extern int bitmap_find_next_cleared(unsigned long *bits, int nbits, int pos); + +/** + * bitmap_for_each_set - generates a loop iteration over each set bit + * @bits: the bitmap + * @nbits: the number of total bits + * @pos: the bit position (int) + */ +#define bitmap_for_each_set(bits, nbits, pos) \ + for ((pos) = -1; (pos) = bitmap_find_next_set((bits), (nbits), ((pos) + 1)), (pos) < (nbits);) + +/** + * bitmap_for_each_cleared - generates a loop iteration over each cleared bit + * @bits: the bitmap + * @nbits: the number of total bits + * @pos: the bit position (int) + */ +#define bitmap_for_each_cleared(bits, nbits, pos) \ + for ((pos) = -1; \ + (pos) = bitmap_find_next_cleared((bits), (nbits), ((pos) + 1)), (pos) < (nbits);) diff --git a/utils/include/utils/byteorder.h b/utils/include/utils/byteorder.h new file mode 100644 index 0000000..1f86a8e --- /dev/null +++ b/utils/include/utils/byteorder.h @@ -0,0 +1,93 @@ +/* + * byteorder.h - utilties for swapping bytes and converting endianness + */ + +#pragma once + +#include + +#include + +static inline uint16_t __bswap16(uint16_t val) +{ +#ifdef HAS_BUILTIN_BSWAP + return __builtin_bswap16(val); +#else + return (((val & 0x00ffU) << 8) | + ((val & 0xff00U) >> 8)); +#endif +} + +static inline uint32_t __bswap32(uint32_t val) +{ +#ifdef HAS_BUILTIN_BSWAP + return __builtin_bswap32(val); +#else + return (((val & 0x000000ffUL) << 24) | + ((val & 0x0000ff00UL) << 8) | + ((val & 0x00ff0000UL) >> 8) | + ((val & 0xff000000UL) >> 24)); +#endif +} + +static inline uint64_t __bswap64(uint64_t val) +{ +#ifdef HAS_BUILTIN_BSWAP + return __builtin_bswap64(val); +#else + return (((val & 0x00000000000000ffULL) << 56) | + ((val & 0x000000000000ff00ULL) << 40) | + ((val & 0x0000000000ff0000ULL) << 24) | + ((val & 0x00000000ff000000ULL) << 8) | + ((val & 0x000000ff00000000ULL) >> 8) | + ((val & 0x0000ff0000000000ULL) >> 24) | + ((val & 0x00ff000000000000ULL) >> 40) | + ((val & 0xff00000000000000ULL) >> 56)); +#endif +} + +#ifndef __BYTE_ORDER +#error __BYTE_ORDER is undefined +#endif + +#if __BYTE_ORDER == __LITTLE_ENDIAN + +#define cpu_to_le16(x) (x) +#define cpu_to_le32(x) (x) +#define cpu_to_le64(x) (x) +#define cpu_to_be16(x) (__bswap16(x)) +#define cpu_to_be32(x) (__bswap32(x)) +#define cpu_to_be64(x) (__bswap64(x)) + +#define le16_to_cpu(x) (x) +#define le32_to_cpu(x) (x) +#define le64_to_cpu(x) (x) +#define be16_to_cpu(x) (__bswap16(x)) +#define be32_to_cpu(x) (__bswap32(x)) +#define be64_to_cpu(x) (__bswap64(x)) + +#else /* __BYTE_ORDER == __LITLE_ENDIAN */ + +#define cpu_to_le16(x) (__bswap16(x)) +#define cpu_to_le32(x) (__bswap32(x)) +#define cpu_to_le64(x) (__bswap64(x)) +#define cpu_to_be16(x) (x) +#define cpu_to_be32(x) (x) +#define cpu_to_be64(x) (x) + +#define le16_to_cpu(x) (__bswap16(x)) +#define le32_to_cpu(x) (__bswap32(x)) +#define le64_to_cpu(x) (__bswap64(x)) +#define be16_to_cpu(x) (x) +#define be32_to_cpu(x) (x) +#define be64_to_cpu(x) (x) + +#endif /* __BYTE_ORDER == __LITTLE_ENDIAN */ + +#define ntoh16(x) (be16_to_cpu(x)) +#define ntoh32(x) (be32_to_cpu(x)) +#define ntoh64(x) (be64_to_cpu(x)) + +#define hton16(x) (cpu_to_be16(x)) +#define hton32(x) (cpu_to_be32(x)) +#define hton64(x) (cpu_to_be64(x)) diff --git a/utils/include/utils/cksum.h b/utils/include/utils/cksum.h new file mode 100644 index 0000000..f5bb26a --- /dev/null +++ b/utils/include/utils/cksum.h @@ -0,0 +1,75 @@ +/* + * cksum.h - utilities for calculating checksums + */ + +#pragma once + +#include + +/** + * cksum_internet - performs an internet checksum on a buffer + * @buf: the buffer + * @len: the length in bytes + * + * An internet checksum is a 16-bit one's complement sum. Details + * are described in RFC 1071. + * + * Returns a 16-bit checksum value. + */ +static inline uint16_t cksum_internet(const void *buf, int len) +{ + uint64_t sum; + + asm volatile("xorq %0, %0\n" + + /* process 8 byte chunks */ + "movl %2, %%edx\n" + "shrl $3, %%edx\n" + "cmp $0, %%edx\n" + "jz 2f\n" + "1: adcq (%1), %0\n" + "leaq 8(%1), %1\n" + "decl %%edx\n" + "jne 1b\n" + "adcq $0, %0\n" + + /* process 4 byte (if left) */ + "2: test $4, %2\n" + "je 3f\n" + "movl (%1), %%edx\n" + "addq %%rdx, %0\n" + "adcq $0, %0\n" + "leaq 4(%1), %1\n" + + /* process 2 byte (if left) */ + "3: test $2, %2\n" + "je 4f\n" + "movzxw (%1), %%rdx\n" + "addq %%rdx, %0\n" + "adcq $0, %0\n" + "leaq 2(%1), %1\n" + + /* process 1 byte (if left) */ + "4: test $1, %2\n" + "je 5f\n" + "movzxb (%1), %%rdx\n" + "addq %%rdx, %0\n" + "adcq $0, %0\n" + + /* fold into 16-bit answer */ + "5: movq %0, %1\n" + "shrq $32, %0\n" + "addl %k1, %k0\n" + "adcl $0, %k0\n" + "movq %0, %1\n" + "shrl $16, %k0\n" + "addw %w1, %w0\n" + "adcw $0, %w0\n" + "not %0\n" + + : "=&r"(sum), "=r"(buf) + : "r"(len), "1"(buf) : "%rdx", "cc", "memory"); + + return (uint16_t)sum; +} + diff --git a/utils/include/utils/cpu.h b/utils/include/utils/cpu.h new file mode 100644 index 0000000..c7c50a1 --- /dev/null +++ b/utils/include/utils/cpu.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +static inline int bind_to_cpu(int cpu_id) +{ + cpu_set_t cpuset; + pid_t tid = _gettid(); + + CPU_ZERO(&cpuset); + CPU_SET(cpu_id, &cpuset); + return sched_setaffinity(tid, sizeof(cpu_set_t), &cpuset); +} diff --git a/utils/include/utils/defs.h b/utils/include/utils/defs.h new file mode 100644 index 0000000..a6c8b92 --- /dev/null +++ b/utils/include/utils/defs.h @@ -0,0 +1,350 @@ +/* + * stddef.h - standard definitions + */ + +#pragma once + +#include +#include +#include +#include + +/* + * NOTE: Some code in this file is derived from the public domain CCAN project. + * http://ccodearchive.net/ + */ + +#define CACHE_LINE_SIZE 64 +#define RSP_ALIGNMENT 16 + +#define check_type(expr, type) ((typeof(expr) *)0 != (type *)0) +#define check_types_match(expr1, expr2) ((typeof(expr1) *)0 != (typeof(expr2) *)0) + +/** + * container_of - get pointer to enclosing structure + * @member_ptr: pointer to the structure member + * @containing_type: the type this member is within + * @member: the name of this member within the structure. + * + * Given a pointer to a member of a structure, this macro does pointer + * subtraction to return the pointer to the enclosing type. + */ +#ifndef container_of +#define container_of(member_ptr, containing_type, member) \ + ((containing_type *)((char *)(member_ptr)-offsetof(containing_type, member)) + \ + check_types_match(*(member_ptr), ((containing_type *)0)->member)) +#endif + +/** + * container_of_var - get pointer to enclosing structure using a variable + * @member_ptr: pointer to the structure member + * @container_var: a pointer of same type as this member's container + * @member: the name of this member within the structure. + */ +#define container_of_var(member_ptr, container_var, member) \ + container_of(member_ptr, typeof(*container_var), member) + +/** + * ARRAY_SIZE - get the number of elements in a visible array + * @arr: the array whose size you want. + */ +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) + +/** + * MAX - picks the maximum of two expressions + * + * Arguments @a and @b are evaluated exactly once + */ +#define MAX(a, b) \ + ({ \ + typeof(a) _a = (a); \ + typeof(b) _b = (b); \ + _a > _b ? _a : _b; \ + }) + +/** + * MIN - picks the minimum of two expressions + * + * Arguments @a and @b are evaluated exactly once + */ +#define MIN(a, b) \ + ({ \ + typeof(a) _a = (a); \ + typeof(b) _b = (b); \ + _a < _b ? _a : _b; \ + }) + +/** + * is_power_of_two - determines if an integer is a power of two + * @x: the value + * + * Returns true if the integer is a power of two. + */ +#define is_power_of_two(x) ((x) != 0 && !((x) & ((x)-1))) + +/** + * align_up - rounds a value up to an alignment + * @x: the value + * @align: the alignment (must be power of 2) + * + * Returns an aligned value. + */ +#define align_up(x, align) \ + ({ \ + assert(is_power_of_two(align)); \ + (((x)-1) | ((typeof(x))(align)-1)) + 1; \ + }) + +/** + * align_down - rounds a value down to an alignment + * @x: the value + * @align: the alignment (must be power of 2) + * + * Returns an aligned value. + */ +#define align_down(x, align) \ + ({ \ + assert(is_power_of_two(align)); \ + ((x) & ~((typeof(x))(align)-1)); \ + }) + +/** + * is_aligned - determines if a value is aligned + * @x: the value + * @align: the alignment (must be power of 2) + * + * Returns true if the value is aligned. + */ +#define is_aligned(x, align) (((x) & ((typeof(x))(align)-1)) == 0) + +/** + * div_up - divides two numbers, rounding up to an integer + * @x: the dividend + * @d: the divisor + * + * Returns a rounded-up quotient. + */ +#define div_up(x, d) ((((x) + (d)-1)) / (d)) + +/** + * Define an array of per-cpu variables, make the array size a multiple of + * cacheline. + */ +#define declear_cpu_array(type, ident, num) \ + extern type ident[((sizeof(type) * num + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)) / \ + sizeof(type)] + +#define define_cpu_array(type, ident, num) \ + type ident[((sizeof(type) * num + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)) / \ + sizeof(type)]; \ + _Static_assert(sizeof(ident) % CACHE_LINE_SIZE == 0) + +/** + * __cstr - converts a value to a string + */ +#define __cstr_t(x...) #x +#define __cstr(x...) __cstr_t(x) + +/** + * BIT - generates a value with one set bit by index + * @n: the bit index to set + * + * Returns a long-sized constant. + */ +#define BIT(n) (1UL << (n)) + +/* common sizes */ +#define KB (1024) +#define MB (1024 * KB) +#define GB (1024 * MB) + +/** + * wraps_lt - a < b ? + * + * This comparison is safe against unsigned wrap around. + */ +static inline bool wraps_lt(uint32_t a, uint32_t b) +{ + return (int32_t)(a - b) < 0; +} + +/** + * wraps_lte - a <= b ? + * + * This comparison is safe against unsigned wrap around. + */ +static inline bool wraps_lte(uint32_t a, uint32_t b) +{ + return (int32_t)(a - b) <= 0; +} + +/** + * wraps_gt - a > b ? + * + * This comparison is safe against unsigned wrap around. + */ +static inline bool wraps_gt(uint32_t a, uint32_t b) +{ + return (int32_t)(b - a) < 0; +} + +/** + * wraps_gte - a >= b ? + * + * This comparison is safe against unsigned wrap around. + */ +static inline bool wraps_gte(uint32_t a, uint32_t b) +{ + return (int32_t)(b - a) <= 0; +} + +enum { + PGSHIFT_4KB = 12, + PGSHIFT_2MB = 21, + PGSHIFT_1GB = 30, +}; + +enum { + PGSIZE_4KB = (1 << PGSHIFT_4KB), /* 4096 bytes */ + PGSIZE_2MB = (1 << PGSHIFT_2MB), /* 2097152 bytes */ + PGSIZE_1GB = (1 << PGSHIFT_1GB), /* 1073741824 bytes */ +}; + +#define PGMASK_4KB (PGSIZE_4KB - 1) +#define PGMASK_2MB (PGSIZE_2MB - 1) +#define PGMASK_1GB (PGSIZE_1GB - 1) + +/* page numbers */ +#define PGN_4KB(la) (((uintptr_t)(la)) >> PGSHIFT_4KB) +#define PGN_2MB(la) (((uintptr_t)(la)) >> PGSHIFT_2MB) +#define PGN_1GB(la) (((uintptr_t)(la)) >> PGSHIFT_1GB) + +#define PGOFF_4KB(la) (((uintptr_t)(la)) & PGMASK_4KB) +#define PGOFF_2MB(la) (((uintptr_t)(la)) & PGMASK_2MB) +#define PGOFF_1GB(la) (((uintptr_t)(la)) & PGMASK_1GB) + +#define PGADDR_4KB(la) (((uintptr_t)(la)) & ~((uintptr_t)PGMASK_4KB)) +#define PGADDR_2MB(la) (((uintptr_t)(la)) & ~((uintptr_t)PGMASK_2MB)) +#define PGADDR_1GB(la) (((uintptr_t)(la)) & ~((uintptr_t)PGMASK_1GB)) + +#ifndef MAP_FAILED +#define MAP_FAILED ((void *)-1) +#endif + +#ifndef likely +#define likely(x) __builtin_expect(!!(x), 1) +#endif +#ifndef unlikely +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif +#define unreachable() __builtin_unreachable() + +#define prefetch0(x) __builtin_prefetch((x), 0, 3) +#define prefetch1(x) __builtin_prefetch((x), 0, 2) +#define prefetch2(x) __builtin_prefetch((x), 0, 1) +#define prefetchnta(x) __builtin_prefetch((x), 0, 0) +#define prefetch(x) prefetch0(x) + +/* variable attributes */ +#define __packed __attribute__((packed)) +#define __notused __attribute__((unused)) +#define __used __attribute__((used)) +#define __aligned(x) __attribute__((aligned(x))) +#define __aligned_cacheline __aligned(CACHE_LINE_SIZE) + +/* function attributes */ +#define __api +#define __noinline __attribute__((noinline)) +#define __noreturn __attribute__((noreturn)) +#define __must_use_return __attribute__((warn_unused_result)) +#define __pure __attribute__((pure)) +#define __weak __attribute__((weak)) +#define __malloc __attribute__((malloc)) +#define __assume_aligned(x) __attribute__((assume_aligned(x))) + +#define barrier() asm volatile("" ::: "memory") + +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) + +static __always_inline void __write_once_size(volatile void *p, void *res, int size) +{ + switch (size) { + case 1: + *(volatile uint8_t *)p = *(uint8_t *)res; + break; + case 2: + *(volatile uint16_t *)p = *(uint16_t *)res; + break; + case 4: + *(volatile uint32_t *)p = *(uint32_t *)res; + break; + case 8: + *(volatile uint64_t *)p = *(uint64_t *)res; + break; + default: + barrier(); + __builtin_memcpy((void *)p, (const void *)res, size); + barrier(); + } +} + +#define WRITE_ONCE(x, val) \ + ({ \ + union { \ + typeof(x) __val; \ + char __c[1]; \ + } __u = {.__val = (__force typeof(x))(val)}; \ + __write_once_size(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ + }) + +/* + * These attributes are defined only with the sparse checker tool. + */ +#ifdef __CHECKER__ +#define __rcu __attribute__((noderef, address_space(1))) +#define __percpu __attribute__((noderef, address_space(2))) +#define __force __attribute__((force)) +#undef __assume_aligned +#define __assume_aligned(x) +#else /* __CHECKER__ */ +#define __rcu +#define __percpu +#define __force +#endif /* __CHECKER__ */ + +/* + * Endianness + */ + +#define __LITTLE_ENDIAN 1234 +#define __BIG_ENDIAN 4321 + +#define __BYTE_ORDER __LITTLE_ENDIAN + +/* + * Word Size + */ + +#define __32BIT_WORDS 32 +#define __64BIT_WORDS 64 + +#define __WORD_SIZE __64BIT_WORDS + +#define CACHE_LINE_SIZE 64 + +/* multiply u64 with u32 */ +static inline uint64_t mul_u64_u32_shr(uint64_t a, uint32_t mul, unsigned int shift) +{ + uint32_t ah, al; + uint64_t ret; + + al = a; + ah = a >> 32; + + ret = ((uint64_t)al * mul) >> shift; + if (ah) + ret += ((uint64_t)ah * mul) << (32 - shift); + + return ret; +} diff --git a/utils/include/utils/gen.h b/utils/include/utils/gen.h new file mode 100644 index 0000000..70ce572 --- /dev/null +++ b/utils/include/utils/gen.h @@ -0,0 +1,62 @@ +/* + * gen.h - shared generation numbers + */ + +#pragma once + +#include + +#include + +/* describes a generation number */ +struct gen_num { + uint32_t prev_gen; + volatile uint32_t *gen; +}; + +/* + * gen_active - used by a writer to indicate that a generation is ongoing + */ +static inline void gen_active(struct gen_num *g) +{ + if (*g->gen == 0) + *g->gen = g->prev_gen + 1; +} + +/* + * gen_inactive - used by a writer to indicate that we are between generations + */ +static inline void gen_inactive(struct gen_num *g) +{ + if (*g->gen != 0) { + g->prev_gen = *g->gen; + *g->gen = 0; + } +} + +/* + * gen_in_same_gen - used by a reader to determine if we are in the same + * generation as last time we checked + * + * Returns true if we are in the same generation as last time, false if we are + * in a different generation or are between generations. + */ +static inline bool gen_in_same_gen(struct gen_num *g) +{ + uint32_t current_gen = *g->gen; + bool unchanged; + + unchanged = (current_gen != 0) && (current_gen == g->prev_gen); + g->prev_gen = current_gen; + + return unchanged; +} + +/* + * gen_init - initialize a shared generation number + */ +static inline void gen_init(struct gen_num *g, uint32_t *gen) +{ + g->prev_gen = 0; + g->gen = gen; +} diff --git a/utils/include/utils/hash.h b/utils/include/utils/hash.h new file mode 100644 index 0000000..f5c2e61 --- /dev/null +++ b/utils/include/utils/hash.h @@ -0,0 +1,154 @@ +/* + * hash.h - hash functions for use with hash tables + * + * NOTE: We focus our efforts on hash functions for small input values, + * as the intention is to use these for network protocol addresses + * (e.g. IP source, IP destionation, source port, destination port, + * etc.) + * + * Jenkins hash is provided for arbitrary length inputs. + */ + +#pragma once + +#include +#include +#include + +static inline uint64_t __mm_crc32_u64(uint64_t crc, uint64_t val) +{ + asm("crc32q %1, %0" : "+r"(crc) : "rm"(val)); + return crc; +} + +/** + * hash_crc32c_one - hashes one 64-bit word + * @seed: useful for creating multiple hash functions + * @val: the word to hash + * + * Returns a 32-bit hash value. + */ +static inline uint32_t hash_crc32c_one(uint32_t seed, uint64_t val) +{ + return __mm_crc32_u64(seed, val); +} + +/** + * hash_crc32c_two - hashes two 64-bit words + * @seed: useful for creating multiple hash functions + * @a: the first word to hash + * @b: the second word to hash + * + * Returns a 32-bit hash value. + */ +static inline uint32_t hash_crc32c_two(uint32_t seed, uint64_t a, uint64_t b) +{ + seed = __mm_crc32_u64(seed, a); + return __mm_crc32_u64(seed, b); +} + +/* + * These functions are a simplified subset of CityHash, modified to focus + * just on small input values. They are based on Google's original CityHash + * implementation, and are intended to be functionally equivalent. + * + * Copyright (c) 2011 Google, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * CityHash was created by Geoff Pike and Jyrki Alakuijala + */ + +#define HASH_CITY_K2 0x9ae16a3b2f90404fULL + +static inline uint64_t __hash_city_len16(uint64_t u, uint64_t v, uint64_t mul) +{ + uint64_t a, b; + a = (u ^ v) * mul; + a ^= (a >> 47); + b = (v ^ a) * mul; + b ^= (b >> 47); + b *= mul; + return b; +} + +static inline uint64_t __hash_city_rotate(uint64_t val, int shift) +{ + return ((val >> shift) | (val << (64 - shift))); +} + +/** + * hash_city_one - hashes one 64-bit word + * @val: the word to hash + * + * This uses Google's CityHash algorithm. + * + * Returns a 64-bit hash value. + */ +static inline uint64_t hash_city_one(uint64_t val) +{ + uint64_t mul = HASH_CITY_K2 + 16; + uint64_t a = val + HASH_CITY_K2; + uint64_t b = val; + uint64_t c = __hash_city_rotate(b, 37) * mul + a; + uint64_t d = (__hash_city_rotate(a, 25) + b) * mul; + return __hash_city_len16(c, d, mul); +} + +/** + * hash_city_one - hashes one 64-bit word + * @val_a: the first word to hash + * @val_b: the second word to hash + * + * This uses Google's CityHash algorithm. + * + * Returns a 64-bit hash value. + */ +static inline uint64_t hash_city_two(uint64_t val_a, uint64_t val_b) +{ + uint64_t mul = HASH_CITY_K2 + 32; + uint64_t a = val_a + HASH_CITY_K2; + uint64_t b = val_b; + uint64_t c = __hash_city_rotate(b, 37) * mul + a; + uint64_t d = (__hash_city_rotate(a, 25) + b) * mul; + return __hash_city_len16(c, d, mul); +} + +extern uint32_t jenkins_hash(const void *key, size_t length); + +/** + * rand_crc32c - generates a very fast pseudorandom value using crc32c + * @seed: a seed-value for the hash + * + * WARNING: not a cryptographic hash. + */ +static inline uint64_t rand_crc32c(uint32_t seed) +{ + return hash_crc32c_one(seed, now_tsc()); +} + +/** + * rand_city - generates a very fast pseudorandom value using city hash + * + * WARNING: not a cryptographic hash. + */ +static inline uint64_t rand_city(void) +{ + return hash_city_one(now_tsc()); +} diff --git a/utils/include/utils/init.h b/utils/include/utils/init.h new file mode 100644 index 0000000..6bfa5ef --- /dev/null +++ b/utils/include/utils/init.h @@ -0,0 +1,30 @@ +/* + * bitmap.h - a library for bit array manipulation + */ + +#include + +struct init_entry { + const char *name; + int (*init)(void); +}; + +#define INITIALIZER(name, suffix) \ + { \ + __cstr(name), &name##_##suffix \ + } + +static inline int run_init_handlers(const struct init_entry *h, int n) +{ + int i, ret; + + for (i = 0; i < n; i++) { + ret = h[i].init(); + if (ret) { + log_err("\t%s init failed, ret = %d", h[i].name, ret); + return ret; + } + } + + return 0; +} \ No newline at end of file diff --git a/utils/include/utils/kref.h b/utils/include/utils/kref.h new file mode 100644 index 0000000..674bb8c --- /dev/null +++ b/utils/include/utils/kref.h @@ -0,0 +1,72 @@ +/* + * kref.h - generic support for reference counts + * + * This implementation is inspired by the following paper: + * Kroah-Hartman, Greg, kobjects and krefs. Linux Symposium 2004 + */ + +#pragma once + +#include +#include + +struct kref { + atomic_int cnt; +}; + +/** + * kref_init - initializes the reference count to one + * @ref: the kref + */ +static inline void kref_init(struct kref *ref) +{ + atomic_store_rel(&ref->cnt, 1); +} + +/** + * kref_initn - initializes the reference count to @n + * @ref: the kref + * @n: the initial reference count + */ +static inline void kref_initn(struct kref *ref, int n) +{ + atomic_store_rel(&ref->cnt, n); +} + +/** + * kref_get - atomically increments the reference count + * @ref: the kref + */ +static inline void kref_get(struct kref *ref) +{ + assert(atomic_load_acq(&ref->cnt) > 0); + atomic_inc(&ref->cnt); +} + +/** + * kref_put - atomically decrements the reference count, releasing the object + * when it reaches zero + * @ref: the kref + * @release: a pointer to the release function + */ +static inline void kref_put(struct kref *ref, void (*release)(struct kref *ref)) +{ + assert(release); + if (atomic_dec_zero(&ref->cnt)) + release(ref); +} + +/** + * kref_released - has this kref been released? + * @ref: the kref + * + * WARNING: this is unsafe without additional synchronization. For example, use + * this function while holding a lock that prevents the release() function from + * removing the object from the data structure you are accessing. + * + * Returns true if the reference count has dropped to zero. + */ +static inline bool kref_released(struct kref *ref) +{ + return atomic_load_acq(&ref->cnt) == 0; +} diff --git a/utils/include/utils/list.h b/utils/include/utils/list.h new file mode 100644 index 0000000..a0486e2 --- /dev/null +++ b/utils/include/utils/list.h @@ -0,0 +1,736 @@ +/* + * list.h - The CCAN linked list library + * + * see http://ccodearchive.net/info/list.html + * Licensed under BSD-MIT + * + * Two other types of lists were also added to better support hash tables + * - "slist" is a singly linked list + * - "hlist" is a doubly linked list with a single head pointer to save space. + * + * hlist's can only be traversed in the forward direction. However, unlike + * slist's, they support constant time removals. + */ + +#pragma once + +#include +#include + +#include +#include + +/** + * struct list_node - an entry in a doubly-linked list + * @next: next entry (self if empty) + * @prev: previous entry (self if empty) + * + * This is used as an entry in a linked list. + * Example: + * struct child { + * const char *name; + * // Linked list of all us children. + * struct list_node list; + * }; + */ +struct list_node { + struct list_node *next, *prev; +}; + +/** + * struct list_head - the head of a doubly-linked list + * @h: the list_head (containing next and prev pointers) + * + * This is used as the head of a linked list. + * Example: + * struct parent { + * const char *name; + * struct list_head children; + * unsigned int num_children; + * }; + */ +struct list_head { + struct list_node n; +}; + +/** + * list_check - check head of a list for consistency + * @h: the list_head + * @abortstr: the location to print on aborting, or NULL. + * + * Because list_nodes have redundant information, consistency checking between + * the back and forward links can be done. This is useful as a debugging check. + * If @abortstr is non-NULL, that will be printed in a diagnostic if the list + * is inconsistent, and the function will abort. + * + * Returns the list head if the list is consistent, NULL if not (it + * can never return NULL if @abortstr is set). + * + * See also: list_check_node() + * + * Example: + * static void dump_parent(struct parent *p) + * { + * struct child *c; + * + * printf("%s (%u children):\n", p->name, p->num_children); + * list_check(&p->children, "bad child list"); + * list_for_each(&p->children, c, list) + * printf(" -> %s\n", c->name); + * } + */ +struct list_head *list_check(const struct list_head *h, const char *abortstr); + +/** + * list_check_node - check node of a list for consistency + * @n: the list_node + * @abortstr: the location to print on aborting, or NULL. + * + * Check consistency of the list node is in (it must be in one). + * + * See also: list_check() + * + * Example: + * static void dump_child(const struct child *c) + * { + * list_check_node(&c->list, "bad child list"); + * printf("%s\n", c->name); + * } + */ +struct list_node *list_check_node(const struct list_node *n, const char *abortstr); + +// #ifdef DEBUG +// #define list_debug(h) list_check((h), __func__) +// #define list_debug_node(n) list_check_node((n), __func__) +// #else +#define list_debug(h) (h) +#define list_debug_node(n) (n) +// #endif + +/** + * LIST_HEAD_INIT - initializer for an empty list_head + * @name: the name of the list. + * + * Explicit initializer for an empty list. + * + * See also: + * LIST_HEAD, list_head_init() + * + * Example: + * static struct list_head my_list = LIST_HEAD_INIT(my_list); + */ +#define LIST_HEAD_INIT(name) \ + { \ + { \ + &name.n, &name.n \ + } \ + } + +/** + * DEFINE_LIST_HEAD - define and initialize an empty list_head + * @name: the name of the list. + * + * The DEFINE_LIST_HEAD macro defines a list_head and initializes it to an empty + * list. It can be prepended by "static" to define a static list_head. + * + * See also: + * LIST_HEAD_INIT, list_head_init() + * + * Example: + * static DEFINE_LIST_HEAD(my_global_list); + */ +#define DEFINE_LIST_HEAD(name) struct list_head name = LIST_HEAD_INIT(name) + +/** + * list_head_init - initialize a list_head + * @h: the list_head to set to the empty list + * + * Example: + * ... + * struct parent *parent = malloc(sizeof(*parent)); + * + * list_head_init(&parent->children); + * parent->num_children = 0; + */ +static inline void list_head_init(struct list_head *h) +{ + h->n.next = h->n.prev = &h->n; +} + +/** + * list_add - add an entry at the start of a linked list. + * @h: the list_head to add the node to + * @n: the list_node to add to the list. + * + * The list_node does not need to be initialized; it will be overwritten. + * Example: + * struct child *child = malloc(sizeof(*child)); + * + * child->name = "marvin"; + * list_add(&parent->children, &child->list); + * parent->num_children++; + */ +static inline void list_add(struct list_head *h, struct list_node *n) +{ + n->next = h->n.next; + n->prev = &h->n; + h->n.next->prev = n; + h->n.next = n; + (void)list_debug(h); +} + +/** + * list_add_tail - add an entry at the end of a linked list. + * @h: the list_head to add the node to + * @n: the list_node to add to the list. + * + * The list_node does not need to be initialized; it will be overwritten. + * Example: + * list_add_tail(&parent->children, &child->list); + * parent->num_children++; + */ +static inline void list_add_tail(struct list_head *h, struct list_node *n) +{ + n->next = &h->n; + n->prev = h->n.prev; + h->n.prev->next = n; + h->n.prev = n; + (void)list_debug(h); +} + +static inline void __list_add(struct list_node *prev, struct list_node *n, struct list_node *next) +{ + prev->next = n; + n->prev = prev; + n->next = next; + next->prev = n; +} + +static inline void list_add_after(struct list_node *prev, struct list_node *n) +{ + __list_add(prev, n, prev->next); +} + +static inline void list_add_before(struct list_node *next, struct list_node *n) +{ + __list_add(next->prev, n, next); +} + +/** + * list_empty - is a list empty? + * @h: the list_head + * + * If the list is empty, returns true. + * + * Example: + * assert(list_empty(&parent->children) == (parent->num_children == 0)); + */ +static inline bool list_empty(const struct list_head *h) +{ + (void)list_debug(h); + return h->n.next == &h->n; +} + +/** + * list_empty_volatile - is a list empty? (forces compiler to check) + * @h: the list_head + * + * If the list is empty, returns true. + */ +static inline bool list_empty_volatile(const struct list_head *h) +{ + return ACCESS_ONCE(h->n.next) == &h->n; +} + +/** + * list_del - delete an entry from an (unknown) linked list. + * @n: the list_node to delete from the list. + * + * Note that this leaves @n in an undefined state; it can be added to + * another list, but not deleted again. + * + * See also: + * list_del_from() + * + * Example: + * list_del(&child->list); + * parent->num_children--; + */ +static inline void list_del(struct list_node *n) +{ + (void)list_debug_node(n); + n->next->prev = n->prev; + n->prev->next = n->next; +#ifdef DEBUG + /* Catch use-after-del. */ + n->next = n->prev = NULL; +#endif +} + +/** + * list_del_from - delete an entry from a known linked list. + * @h: the list_head the node is in. + * @n: the list_node to delete from the list. + * + * This explicitly indicates which list a node is expected to be in, + * which is better documentation and can catch more bugs. + * + * See also: list_del() + * + * Example: + * list_del_from(&parent->children, &child->list); + * parent->num_children--; + */ +static inline void list_del_from(struct list_head *h, struct list_node *n) +{ +#ifdef DEBUG + { + /* Thorough check: make sure it was in list! */ + struct list_node *i; + for (i = h->n.next; i != n; i = i->next) assert(i != &h->n); + } + assert(!list_empty(h)); +#endif /* DEBUG */ + + /* Quick test that catches a surprising number of bugs. */ + list_del(n); +} + +/** + * list_entry - convert a list_node back into the structure containing it. + * @n: the list_node + * @type: the type of the entry + * @member: the list_node member of the type + * + * Example: + * // First list entry is children.next; convert back to child. + * child = list_entry(parent->children.n.next, struct child, list); + * + * See Also: + * list_top(), list_for_each() + */ +#define list_entry(n, type, member) container_of(n, type, member) + +/** + * list_top - get the first entry in a list + * @h: the list_head + * @type: the type of the entry + * @member: the list_node member of the type + * + * If the list is empty, returns NULL. + * + * Example: + * struct child *first; + * first = list_top(&parent->children, struct child, list); + * if (!first) + * printf("Empty list!\n"); + */ +#define list_top(h, type, member) ((type *)list_top_((h), list_off_(type, member))) + +static inline const void *list_top_(const struct list_head *h, size_t off) +{ + if (list_empty(h)) + return NULL; + return (const char *)h->n.next - off; +} + +/** + * list_pop - remove the first entry in a list + * @h: the list_head + * @type: the type of the entry + * @member: the list_node member of the type + * + * If the list is empty, returns NULL. + * + * Example: + * struct child *one; + * one = list_pop(&parent->children, struct child, list); + * if (!one) + * printf("Empty list!\n"); + */ +#define list_pop(h, type, member) ((type *)list_pop_((h), list_off_(type, member))) + +static inline const void *list_pop_(const struct list_head *h, size_t off) +{ + struct list_node *n; + + if (list_empty(h)) + return NULL; + n = h->n.next; + list_del(n); + return (const char *)n - off; +} + +/** + * list_tail - get the last entry in a list + * @h: the list_head + * @type: the type of the entry + * @member: the list_node member of the type + * + * If the list is empty, returns NULL. + * + * Example: + * struct child *last; + * last = list_tail(&parent->children, struct child, list); + * if (!last) + * printf("Empty list!\n"); + */ +#define list_tail(h, type, member) ((type *)list_tail_((h), list_off_(type, member))) + +static inline const void *list_tail_(const struct list_head *h, size_t off) +{ + if (list_empty(h)) + return NULL; + return (const char *)h->n.prev - off; +} + +/** + * list_for_each - iterate through a list. + * @h: the list_head (warning: evaluated multiple times!) + * @i: the structure containing the list_node + * @member: the list_node member of the structure + * + * This is a convenient wrapper to iterate @i over the entire list. It's + * a for loop, so you can break and continue as normal. + * + * Example: + * list_for_each(&parent->children, child, list) + * printf("Name: %s\n", child->name); + */ +#define list_for_each(h, i, member) list_for_each_off(h, i, list_off_var_(i, member)) + +/** + * list_for_each_rev - iterate through a list backwards. + * @h: the list_head + * @i: the structure containing the list_node + * @member: the list_node member of the structure + * + * This is a convenient wrapper to iterate @i over the entire list. It's + * a for loop, so you can break and continue as normal. + * + * Example: + * list_for_each_rev(&parent->children, child, list) + * printf("Name: %s\n", child->name); + */ +#define list_for_each_rev(h, i, member) \ + for (i = container_of_var(list_debug(h)->n.prev, i, member); &i->member != &(h)->n; \ + i = container_of_var(i->member.prev, i, member)) + +/** + * list_for_each_safe - iterate through a list, maybe during deletion + * @h: the list_head + * @i: the structure containing the list_node + * @nxt: the structure containing the list_node + * @member: the list_node member of the structure + * + * This is a convenient wrapper to iterate @i over the entire list. It's + * a for loop, so you can break and continue as normal. The extra variable + * @nxt is used to hold the next element, so you can delete @i from the list. + * + * Example: + * struct child *next; + * list_for_each_safe(&parent->children, child, next, list) { + * list_del(&child->list); + * parent->num_children--; + * } + */ +#define list_for_each_safe(h, i, nxt, member) \ + list_for_each_safe_off(h, i, nxt, list_off_var_(i, member)) + +/** + * list_next - get the next entry in a list + * @h: the list_head + * @i: a pointer to an entry in the list. + * @member: the list_node member of the structure + * + * If @i was the last entry in the list, returns NULL. + * + * Example: + * struct child *second; + * second = list_next(&parent->children, first, list); + * if (!second) + * printf("No second child!\n"); + */ +#define list_next(h, i, member) \ + ((list_typeof(i))list_entry_or_null(list_debug(h), (i)->member.next, \ + list_off_var_((i), member))) + +/** + * list_prev - get the previous entry in a list + * @h: the list_head + * @i: a pointer to an entry in the list. + * @member: the list_node member of the structure + * + * If @i was the first entry in the list, returns NULL. + * + * Example: + * first = list_prev(&parent->children, second, list); + * if (!first) + * printf("Can't go back to first child?!\n"); + */ +#define list_prev(h, i, member) \ + ((list_typeof(i))list_entry_or_null(list_debug(h), (i)->member.prev, \ + list_off_var_((i), member))) + +/** + * list_append_list - empty one list onto the end of another. + * @to: the list to append into + * @from: the list to empty. + * + * This takes the entire contents of @from and moves it to the end of + * @to. After this @from will be empty. + * + * Example: + * struct list_head adopter; + * + * list_append_list(&adopter, &parent->children); + * assert(list_empty(&parent->children)); + * parent->num_children = 0; + */ +static inline void list_append_list(struct list_head *to, struct list_head *from) +{ + struct list_node *from_tail = list_debug(from)->n.prev; + struct list_node *to_tail = list_debug(to)->n.prev; + + /* Sew in head and entire list. */ + to->n.prev = from_tail; + from_tail->next = &to->n; + to_tail->next = &from->n; + from->n.prev = to_tail; + + /* Now remove head. */ + list_del(&from->n); + list_head_init(from); +} + +/** + * list_prepend_list - empty one list into the start of another. + * @to: the list to prepend into + * @from: the list to empty. + * + * This takes the entire contents of @from and moves it to the start + * of @to. After this @from will be empty. + * + * Example: + * list_prepend_list(&adopter, &parent->children); + * assert(list_empty(&parent->children)); + * parent->num_children = 0; + */ +static inline void list_prepend_list(struct list_head *to, struct list_head *from) +{ + struct list_node *from_tail = list_debug(from)->n.prev; + struct list_node *to_head = list_debug(to)->n.next; + + /* Sew in head and entire list. */ + to->n.next = &from->n; + from->n.prev = &to->n; + to_head->prev = from_tail; + from_tail->next = to_head; + + /* Now remove head. */ + list_del(&from->n); + list_head_init(from); +} + +/** + * list_for_each_off - iterate through a list of memory regions. + * @h: the list_head + * @i: the pointer to a memory region wich contains list node data. + * @off: offset(relative to @i) at which list node data resides. + * + * This is a low-level wrapper to iterate @i over the entire list, used to + * implement all oher, more high-level, for-each constructs. It's a for loop, + * so you can break and continue as normal. + * + * WARNING! Being the low-level macro that it is, this wrapper doesn't know + * nor care about the type of @i. The only assumtion made is that @i points + * to a chunk of memory that at some @offset, relative to @i, contains a + * properly filled `struct node_list' which in turn contains pointers to + * memory chunks and it's turtles all the way down. Whith all that in mind + * remember that given the wrong pointer/offset couple this macro will + * happilly churn all you memory untill SEGFAULT stops it, in other words + * caveat emptor. + * + * It is worth mentioning that one of legitimate use-cases for that wrapper + * is operation on opaque types with known offset for `struct list_node' + * member(preferably 0), because it allows you not to disclose the type of + * @i. + * + * Example: + * list_for_each_off(&parent->children, child, + * offsetof(struct child, list)) + * printf("Name: %s\n", child->name); + */ +#define list_for_each_off(h, i, off) \ + for (i = list_node_to_off_(list_debug(h)->n.next, (off)); \ + list_node_from_off_((void *)i, (off)) != &(h)->n; \ + i = list_node_to_off_(list_node_from_off_((void *)i, (off))->next, (off))) + +/** + * list_for_each_safe_off - iterate through a list of memory regions, maybe + * during deletion + * @h: the list_head + * @i: the pointer to a memory region wich contains list node data. + * @nxt: the structure containing the list_node + * @off: offset(relative to @i) at which list node data resides. + * + * For details see `list_for_each_off' and `list_for_each_safe' + * descriptions. + * + * Example: + * list_for_each_safe_off(&parent->children, child, + * next, offsetof(struct child, list)) + * printf("Name: %s\n", child->name); + */ +#define list_for_each_safe_off(h, i, nxt, off) \ + for (i = list_node_to_off_(list_debug(h)->n.next, (off)), \ + nxt = list_node_to_off_(list_node_from_off_(i, (off))->next, (off)); \ + list_node_from_off_(i, (off)) != &(h)->n; \ + i = nxt, nxt = list_node_to_off_(list_node_from_off_(i, (off))->next, (off))) + +/* Other -off variants. */ +#define list_entry_off(n, type, off) ((type *)list_node_from_off_((n), (off))) + +#define list_head_off(h, type, off) ((type *)list_head_off((h), (off))) + +#define list_tail_off(h, type, off) ((type *)list_tail_((h), (off))) + +#define list_add_off(h, n, off) list_add((h), list_node_from_off_((n), (off))) + +#define list_del_off(n, off) list_del(list_node_from_off_((n), (off))) + +#define list_del_from_off(h, n, off) list_del_from(h, list_node_from_off_((n), (off))) + +/* Offset helper functions so we only single-evaluate. */ +static inline void *list_node_to_off_(struct list_node *node, size_t off) +{ + return (void *)((char *)node - off); +} +static inline struct list_node *list_node_from_off_(void *ptr, size_t off) +{ + return (struct list_node *)((char *)ptr + off); +} + +/* Get the offset of the member, but make sure it's a list_node. */ +#define list_off_(type, member) \ + (offsetof(type, member) + check_type(((type *)0)->member, struct list_node)) + +#define list_off_var_(var, member) \ + (offsetof(typeof(*var), member) + check_type(var->member, struct list_node)) + +#define list_typeof(var) typeof(var) + +/* Returns member, or NULL if at end of list. */ +static inline void *list_entry_or_null(const struct list_head *h, const struct list_node *n, + size_t off) +{ + if (n == &h->n) + return NULL; + return (char *)n - off; +} + +struct slist_node { + struct slist_node *next; +}; + +struct slist_head { + struct slist_node head; +}; + +static inline void slist_init_head(struct slist_head *h) +{ + h->head.next = NULL; +} + +static inline void slist_add_head(struct slist_head *h, struct slist_node *n) +{ + n->next = h->head.next; + h->head.next = n; +} + +static inline void slist_del_head(struct slist_head *h) +{ + h->head.next = h->head.next->next; +} + +static inline void slist_del(struct slist_node *prev, struct slist_node *n) +{ + prev->next = n->next; +} + +static inline bool slist_empty(const struct slist_head *h) +{ + return h->head.next == NULL; +} + +/* Get the offset of the member, but make sure it's a slist_node. */ +#define slist_off_(type, member) \ + (offsetof(type, member) + check_type(((type *)0)->member, struct slist_node)) + +#define slist_pop(h, type, member) ((type *)slist_pop_((h), slist_off_(type, member))) + +static inline const void *slist_pop_(struct slist_head *h, size_t off) +{ + struct slist_node *n; + + if (slist_empty(h)) + return NULL; + n = h->head.next; + slist_del(&h->head, n); + return (const char *)n - off; +} + +#define slist_entry(n, type, member) container_of(n, type, member) + +#define slist_for_each(h, pos) for ((pos) = (h)->head.next; (pos); (pos) = (pos)->next) + +#define slist_for_each_prev(h, pos, ppos) \ + for ((ppos) = &(h)->head; ((pos) = ((ppos)->next)) != NULL; (ppos) = (ppos)->next) + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +struct hlist_head { + struct hlist_node *head; +}; + +static inline void hlist_init_head(struct hlist_head *h) +{ + h->head = NULL; +} + +static inline void hlist_add_head(struct hlist_head *h, struct hlist_node *n) +{ + struct hlist_node *head = h->head; + n->next = head; + n->pprev = &h->head; + h->head = n; + if (head) + head->pprev = &n->next; +} + +static inline void hlist_del(struct hlist_node *n) +{ + *n->pprev = n->next; + if (n->next) + n->next->pprev = n->pprev; +} + +static inline void hlist_del_head(struct hlist_head *h) +{ + if (h->head) + hlist_del(h->head); +} + +static inline bool hlist_empty(struct hlist_head *h) +{ + return h->head == NULL; +} + +#define hlist_entry(n, type, member) container_of(n, type, member) + +#define hlist_for_each(h, pos) for ((pos) = (h)->head; (pos); (pos) = (pos)->next) + +#define hlist_for_each_safe(h, pos, tmp) \ + for ((pos) = (h)->head; (pos) && ((tmp) = (pos)->next, 1); (pos) = (tmp)) diff --git a/utils/include/utils/log.h b/utils/include/utils/log.h new file mode 100644 index 0000000..e1fccfc --- /dev/null +++ b/utils/include/utils/log.h @@ -0,0 +1,101 @@ +/* + * log.h - the logging service + */ + +#pragma once + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +extern void logk(int level, const char *fmt, ...) __attribute__((__format__(__printf__, 2, 3))); +extern void logk_backtrace(void); + +#if defined(LOG_LEVEL_DEBUG) +#define MAX_LOG_LEVEL LOG_DEBUG +#elif defined(LOG_LEVEL_INFO) +#define MAX_LOG_LEVEL LOG_INFO +#elif defined(LOG_LEVEL_NOTICE) +#define MAX_LOG_LEVEL LOG_NOTICE +#elif defined(LOG_LEVEL_WARN) +#define MAX_LOG_LEVEL LOG_WARN +#elif defined(LOG_LEVEL_ERR) +#define MAX_LOG_LEVEL LOG_ERR +#elif defined(LOG_LEVEL_CRIT) +#define MAX_LOG_LEVEL LOG_CRIT +#else +#define MAX_LOG_LEVEL LOG_CRIT +#endif + +#define do_logk(level, fmt, ...) \ + do { \ + if (level <= MAX_LOG_LEVEL) \ + logk(level, fmt, ##__VA_ARGS__); \ + } while (0) + +/* forces format checking */ +#define no_logk(level, fmt, ...) \ + do { \ + if (0) \ + logk(level, fmt, ##__VA_ARGS__); \ + } while (0) + +enum { + LOG_CRIT = 1, /* critical */ + LOG_ERR = 2, /* error */ + LOG_WARN = 3, /* warning */ + LOG_NOTICE = 4, /* significant normal condition */ + LOG_INFO = 5, /* informational */ + LOG_DEBUG = 6, /* debug */ +}; + +#define log_crit(fmt, ...) do_logk(LOG_CRIT, fmt, ##__VA_ARGS__) +#define log_err(fmt, ...) do_logk(LOG_ERR, fmt, ##__VA_ARGS__) +#define log_warn(fmt, ...) do_logk(LOG_WARN, fmt, ##__VA_ARGS__) +#define log_notice(fmt, ...) do_logk(LOG_NOTICE, fmt, ##__VA_ARGS__) +#define log_info(fmt, ...) do_logk(LOG_INFO, fmt, ##__VA_ARGS__) +#define log_debug(fmt, ...) do_logk(LOG_DEBUG, fmt, ##__VA_ARGS__) + +#define log_once(level, fmt, ...) \ + ({ \ + static bool __once; \ + if (unlikely(!__once)) { \ + __once = true; \ + do_logk(level, fmt, ##__VA_ARGS__); \ + } \ + }) + +#define log_crit_once(fmt, ...) log_once(LOG_CRIT, fmt, ##__VA_ARGS__) +#define log_err_once(fmt, ...) log_once(LOG_ERR, fmt, ##__VA_ARGS__) +#define log_warn_once(fmt, ...) log_once(LOG_WARN, fmt, ##__VA_ARGS__) +#define log_notice_once(fmt, ...) log_once(LOG_NOTICE, fmt, ##__VA_ARGS__) +#define log_info_once(fmt, ...) log_once(LOG_INFO, fmt, ##__VA_ARGS__) +#define log_debug_once(fmt, ...) log_once(LOG_DEBUG, fmt, ##__VA_ARGS__) + +#define log_first_n(level, num, fmt, ...) \ + ({ \ + static int __n = (num); \ + if (__n > 0) { \ + __n--; \ + do_logk(level, fmt, ##__VA_ARGS__); \ + } \ + }) + +#define log_crit_first_n(num, fmt, ...) log_first_n(LOG_CRIT, num, fmt, ##__VA_ARGS__) +#define log_err_first_n(num, fmt, ...) log_first_n(LOG_ERR, num, fmt, ##__VA_ARGS__) +#define log_warn_first_n(num, fmt, ...) log_first_n(LOG_WARN, num, fmt, ##__VA_ARGS__) +#define log_notice_first_n(num, fmt, ...) log_first_n(LOG_NOTICE, num, fmt, ##__VA_ARGS__) +#define log_info_first_n(num, fmt, ...) log_first_n(LOG_INFO, num, fmt, ##__VA_ARGS__) +#define log_debug_first_n(num, fmt, ...) log_first_n(LOG_DEBUG, num, fmt, ##__VA_ARGS__) + +#define panic(fmt, ...) \ + do { \ + logk(LOG_CRIT, fmt, ##__VA_ARGS__); \ + exit(EXIT_FAILURE); \ + } while (0) + +#ifdef __cplusplus +} +#endif diff --git a/utils/include/utils/lrpc.h b/utils/include/utils/lrpc.h new file mode 100644 index 0000000..d1e936c --- /dev/null +++ b/utils/include/utils/lrpc.h @@ -0,0 +1,131 @@ +/* + * lrpc.h - shared memory communication channels + * + * This design is inspired by Barrelfish, which in turn was based on Brian + * Bershad's earlier LRPC work. The goal here is to minimize cache misses to + * the maximum extent possible. + */ + +#pragma once + +#include +#include +#include +#include + +struct lrpc_msg { + uint64_t cmd; + uint64_t payload; +}; + +#define LRPC_DONE_PARITY (1UL << 63) +#define LRPC_CMD_MASK (~LRPC_DONE_PARITY) + +/* + * Ingress/Egress Channel Support + */ + +struct lrpc_chan_out { + struct lrpc_msg *tbl; + uint32_t size; + uint32_t mask; + uint32_t head; + uint32_t tail; +}; + +struct lrpc_chan_in { + struct lrpc_msg *tbl; + uint32_t size; + uint32_t mask; + uint32_t head; + volatile uint32_t head_wb; +}; + +struct lrpc_chan { + struct lrpc_chan_out out; + uint64_t pad0[5]; + struct lrpc_chan_in in; + uint64_t pad1[5]; +}; + +BUILD_ASSERT(offsetof(struct lrpc_chan, in) == 64); + +/** + * lrpc_send - sends a message on the channel + * @chan: the ingress/egress channel + * @cmd: the command to send + * @payload: the data payload + * + * Returns true if successful, otherwise the channel is full. + */ +static inline bool lrpc_send(struct lrpc_chan *chan, uint64_t cmd, uint64_t payload) +{ + struct lrpc_chan_out *out = &chan->out; + struct lrpc_msg *dst; + + assert(!(cmd & LRPC_DONE_PARITY)); + + if (unlikely(out->tail - out->head >= out->size)) { + out->head = atomic_load_acq(&chan->in.head_wb); + if (out->tail - out->head == out->size) { + return false; + } + } + + dst = &out->tbl[out->tail & out->mask]; + cmd |= (out->tail++ & out->size) ? 0 : LRPC_DONE_PARITY; + dst->payload = payload; + atomic_store_rel(&dst->cmd, cmd); + return true; +} + +/** + * lrpc_recv - receives a message on the channel + * @chan: the ingress/egress channel + * @cmd_out: a pointer to store the received command + * @payload_out: a pointer to store the received payload + * + * Returns true if successful, otherwise the channel is empty. + */ +static inline bool lrpc_recv(struct lrpc_chan *chan, uint64_t *cmd_out, uint64_t *payload_out) +{ + struct lrpc_chan_in *in = &chan->in; + struct lrpc_msg *m = &in->tbl[in->head & in->mask]; + uint64_t parity = (in->head & in->size) ? 0 : LRPC_DONE_PARITY; + uint64_t cmd; + + cmd = atomic_load_acq(&m->cmd); + if ((cmd & LRPC_DONE_PARITY) != parity) + return false; + in->head++; + + *cmd_out = cmd & LRPC_CMD_MASK; + *payload_out = m->payload; + atomic_store_rel(&in->head_wb, in->head); + return true; +} + +/** + * lrpc_get_cached_length - retrieves the number of queued messages + * @chan: the ingress/egress channel + * + * Returns the number of messages queued in the channel. + */ +static inline uint32_t lrpc_size(struct lrpc_chan *chan) +{ + return chan->out.tail - chan->out.head; +} + +/** + * lrpc_empty - returns true if the channel has no available messages + * @chan: the ingress channel + */ +static inline bool lrpc_empty(struct lrpc_chan *chan) +{ + struct lrpc_chan_in *in = &chan->in; + struct lrpc_msg *m = &in->tbl[in->head & in->mask]; + uint64_t parity = (in->head & in->size) ? 0 : LRPC_DONE_PARITY; + return (ACCESS_ONCE(m->cmd) & LRPC_DONE_PARITY) != parity; +} + +int lrpc_init(struct lrpc_chan *chan, struct lrpc_msg *tbl, unsigned int size); diff --git a/utils/include/utils/msgq.h b/utils/include/utils/msgq.h new file mode 100644 index 0000000..bd702ab --- /dev/null +++ b/utils/include/utils/msgq.h @@ -0,0 +1,99 @@ +/* + * msgq.h: simple message queue with one reader and one writer + */ + +#pragma once + +#include + +#include +#include +#include +#include +#include + +#define MSGQ_SIZE 4096 +#define MSGQ_MASK (MSGQ_SIZE - 1) +#define MSGQ_DONE_PARITY (1UL << 63) +#define MSGQ_CMD_MASK (~MSGQ_DONE_PARITY) + +struct msg { + uint64_t cmd; + uint64_t payload; +}; + +struct msgq { + struct msg *data; + uint32_t head, tail; +}; + +static inline bool msgq_empty(struct msgq *q) +{ + struct msg *m; + uint64_t parity; + + m = &q->data[q->head & MSGQ_MASK]; + parity = (q->head & MSGQ_SIZE) ? 0 : MSGQ_DONE_PARITY; + return (ACCESS_ONCE(m->cmd) & MSGQ_DONE_PARITY) != parity; +} + +/** + * msgq_send - sends a message + * @msgq: the ingress/egress queue + * @cmd: the command to send + * @payload: the data payload + * + * Returns true if successful, otherwise the channel is full. + */ +static inline bool msgq_send(struct msgq *q, uint64_t cmd, uint64_t payload) +{ + struct msg *dst; + + if (unlikely(q->tail >= q->head + MSGQ_SIZE)) + return false; + + dst = &q->data[q->tail & MSGQ_MASK]; + cmd |= (q->tail++ & MSGQ_SIZE) ? 0 : MSGQ_DONE_PARITY; + dst->payload = payload; + atomic_store_rel(&dst->cmd, cmd); + return true; +} + +/** + * msgq_recv - receives a message + * @msgq: the ingress/egress queue + * @cmd_out: a pointer to store the received command + * @payload_out: a pointer to store the received payload + * + * Returns true if successful, otherwise the queue is empty. + */ +static inline bool msgq_recv(struct msgq *q, uint64_t *cmd_out, uint64_t *payload_out) +{ + struct msg *m; + uint64_t parity, cmd; + + m = &q->data[q->head & MSGQ_MASK]; + parity = (q->head & MSGQ_SIZE) ? 0 : MSGQ_DONE_PARITY; + cmd = atomic_load_acq(&m->cmd); + if ((cmd & MSGQ_DONE_PARITY) != parity) + return false; + q->head++; + + *cmd_out = m->cmd & MSGQ_CMD_MASK; + *payload_out = m->payload; + return true; +} + +/** + * msgq_init - initializes a message queue + * @q: the message queue struct to initialize + * @size: the number of message elements in the buffer + * + * returns 0 if successful, or -EINVAL if @size is not a power of two. + */ +static inline int msgq_init(struct msgq *q, void *ptr) +{ + memset(q, 0, sizeof(struct msgq)); + q->data = ptr; + return 0; +} \ No newline at end of file diff --git a/utils/include/utils/ops.h b/utils/include/utils/ops.h new file mode 100644 index 0000000..2fdfaa6 --- /dev/null +++ b/utils/include/utils/ops.h @@ -0,0 +1,18 @@ +/* + * ops.h - useful x86_64 instructions + */ + +#pragma once + +#include + +static inline void cpu_relax(void) +{ +#if __GNUC_PREREQ(10, 0) +#if __has_builtin(__builtin_ia32_pause) + __builtin_ia32_pause(); +#endif +#else + asm volatile("pause"); +#endif +} diff --git a/utils/include/utils/queue.h b/utils/include/utils/queue.h new file mode 100644 index 0000000..ace0f34 --- /dev/null +++ b/utils/include/utils/queue.h @@ -0,0 +1,112 @@ +/* + * queue.h: lock free queue implementation. + * - queue_t: simple FIFO queue with no thread safety + * - spsc_queue_t: Single-Producer-Single-Consumer queue + */ + +#ifndef _UTILS_QUEUE_H_ +#define _UTILS_QUEUE_H_ + +#include +#include +#include +#include +#include + +#include + +#define QUEUE_CAP (1 << 20) +#define QUEUE_CAP_MASK (QUEUE_CAP - 1) + +#define queue_init(queue) \ + do { \ + (queue)->head = (queue)->tail = 0; \ + } while (0) + +/////////////////// naive queue /////////////////// + +typedef struct { + unsigned int head; + unsigned int tail; + void *buf[QUEUE_CAP]; +} queue_t; + +static inline int queue_len(queue_t *queue) { return queue->tail - queue->head; } + +static inline bool queue_is_empty(queue_t *queue) { return queue_len(queue) == 0; } + +static inline bool queue_is_full(queue_t *queue) { return queue_len(queue) >= QUEUE_CAP; } + +static inline void *queue_head(queue_t *queue) { return queue->buf[queue->head & QUEUE_CAP_MASK]; } + +static inline int queue_push(queue_t *queue, void *item) +{ + if (queue_is_full(queue)) { + return -EOVERFLOW; + } + queue->buf[queue->tail & QUEUE_CAP_MASK] = item; + queue->tail++; + return 0; +} + +static inline void *queue_pop(queue_t *queue) +{ + if (queue_is_empty(queue)) { + return NULL; + } + void *item = queue_head(queue); + queue->head++; + return item; +} + +//////////////// SPSC queue //////////////// + +struct spsc_queue_t { + volatile unsigned int head; + volatile unsigned int tail; + void *buf[QUEUE_CAP]; +}; + +static inline int spsc_queue_len(struct spsc_queue_t *queue) +{ + return atomic_load(&(queue)->tail) - atomic_load(&(queue)->head); +} + +static inline bool spsc_queue_is_empty(struct spsc_queue_t *queue) +{ + return spsc_queue_len(queue) == 0; +} + +static inline bool spsc_queue_is_full(struct spsc_queue_t *queue) +{ + return spsc_queue_len(queue) >= QUEUE_CAP; +} + +static inline void *spsc_queue_head(struct spsc_queue_t *queue) +{ + return queue->buf[atomic_load_relax(&queue->head) & QUEUE_CAP_MASK]; +} + +static inline int spsc_queue_push(struct spsc_queue_t *queue, void *item) +{ + unsigned int tail = atomic_load_relax(&queue->tail); + if (tail == atomic_load_acq(&queue->head) + QUEUE_CAP) { + return -EOVERFLOW; + } + queue->buf[tail & QUEUE_CAP_MASK] = item; + atomic_store_rel(&queue->tail, tail + 1); + return 0; +} + +static inline void *spsc_queue_pop(struct spsc_queue_t *queue) +{ + unsigned int head = atomic_load_relax(&queue->head); + if (head == atomic_load_acq(&queue->tail)) { + return NULL; + } + void *item = queue->buf[head & QUEUE_CAP_MASK]; + atomic_store_rel(&queue->head, head + 1); + return item; +} + +#endif // _UTILS_QUEUE_H_ diff --git a/utils/include/utils/rbtree.h b/utils/include/utils/rbtree.h new file mode 100644 index 0000000..9e60019 --- /dev/null +++ b/utils/include/utils/rbtree.h @@ -0,0 +1,678 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + + + linux/include/linux/rbtree.h + + To use rbtrees you'll have to implement your own insert and search cores. + This will avoid us to use callbacks and to drop drammatically performances. + I know it's not the cleaner way, but in C (not in C++) to get + performances and genericity... + + See Documentation/core-api/rbtree.rst for documentation and samples. +*/ + +#pragma once + +#include + +struct rb_node { + unsigned long __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +} __attribute__((aligned(sizeof(long)))); +/* The alignment might seem pointless, but allegedly CRIS needs it */ + +struct rb_root { + struct rb_node *rb_node; +}; + +/* + * Leftmost-cached rbtrees. + * + * We do not cache the rightmost node based on footprint + * size vs number of potential users that could benefit + * from O(1) rb_last(). Just not worth it, users that want + * this feature can always implement the logic explicitly. + * Furthermore, users that want to cache both pointers may + * find it a bit asymmetric, but that's ok. + */ +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +#define RB_ROOT \ + (struct rb_root) \ + { \ + NULL, \ + } +#define RB_ROOT_CACHED \ + (struct rb_root_cached) \ + { \ + { \ + NULL, \ + }, \ + NULL \ + } + +#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) + +#define rb_entry(ptr, type, member) container_of(ptr, type, member) + +#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) + +/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ +#define RB_EMPTY_NODE(node) ((node)->__rb_parent_color == (unsigned long)(node)) +#define RB_CLEAR_NODE(node) ((node)->__rb_parent_color = (unsigned long)(node)) + +void rb_insert_color(struct rb_node *, struct rb_root *); +void rb_erase(struct rb_node *, struct rb_root *); + +/* Find logical next and previous nodes in a tree */ +struct rb_node *rb_next(const struct rb_node *); +struct rb_node *rb_prev(const struct rb_node *); +struct rb_node *rb_first(const struct rb_root *); +struct rb_node *rb_last(const struct rb_root *); + +/* Postorder iteration - always visit the parent after its children */ +struct rb_node *rb_first_postorder(const struct rb_root *); +struct rb_node *rb_next_postorder(const struct rb_node *); + +/* Fast replacement of a single node without remove/rebalance/add/rebalance */ +void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); +// void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, struct rb_root *root); + +static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, + struct rb_node **rb_link) +{ + node->__rb_parent_color = (unsigned long)parent; + node->rb_left = node->rb_right = NULL; + + *rb_link = node; +} + +// static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, +// struct rb_node **rb_link) +// { +// node->__rb_parent_color = (unsigned long)parent; +// node->rb_left = node->rb_right = NULL; + +// rcu_assign_pointer(*rb_link, node); +// } + +#define rb_entry_safe(ptr, type, member) \ + ({ \ + typeof(ptr) ____ptr = (ptr); \ + ____ptr ? rb_entry(____ptr, type, member) : NULL; \ + }) + +/** + * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of + * given type allowing the backing memory of @pos to be invalidated + * + * @pos: the 'type *' to use as a loop cursor. + * @n: another 'type *' to use as temporary storage + * @root: 'rb_root *' of the rbtree. + * @field: the name of the rb_node field within 'type'. + * + * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as + * list_for_each_entry_safe() and allows the iteration to continue independent + * of changes to @pos by the body of the loop. + * + * Note, however, that it cannot handle other modifications that re-order the + * rbtree it is iterating over. This includes calling rb_erase() on @pos, as + * rb_erase() may rebalance the tree, causing us to miss some nodes. + */ +#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ + for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ + pos && ({ \ + n = rb_entry_safe(rb_next_postorder(&pos->field), typeof(*pos), field); \ + 1; \ + }); \ + pos = n) + +/* Same as rb_first(), but O(1) */ +#define rb_first_cached(root) (root)->rb_leftmost + +static inline void rb_insert_color_cached(struct rb_node *node, struct rb_root_cached *root, + bool leftmost) +{ + if (leftmost) + root->rb_leftmost = node; + rb_insert_color(node, &root->rb_root); +} + +static inline struct rb_node *rb_erase_cached(struct rb_node *node, struct rb_root_cached *root) +{ + struct rb_node *leftmost = NULL; + + if (root->rb_leftmost == node) + leftmost = root->rb_leftmost = rb_next(node); + + rb_erase(node, &root->rb_root); + + return leftmost; +} + +static inline void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, + struct rb_root_cached *root) +{ + if (root->rb_leftmost == victim) + root->rb_leftmost = new; + rb_replace_node(victim, new, &root->rb_root); +} + +/* + * The below helper functions use 2 operators with 3 different + * calling conventions. The operators are related like: + * + * comp(a->key,b) < 0 := less(a,b) + * comp(a->key,b) > 0 := less(b,a) + * comp(a->key,b) == 0 := !less(a,b) && !less(b,a) + * + * If these operators define a partial order on the elements we make no + * guarantee on which of the elements matching the key is found. See + * rb_find(). + * + * The reason for this is to allow the find() interface without requiring an + * on-stack dummy object, which might not be feasible due to object size. + */ + +/** + * rb_add_cached() - insert @node into the leftmost cached tree @tree + * @node: node to insert + * @tree: leftmost cached tree to insert @node into + * @less: operator defining the (partial) node order + * + * Returns @node when it is the new leftmost, or NULL. + */ +static __always_inline struct rb_node * +rb_add_cached(struct rb_node *node, struct rb_root_cached *tree, + bool (*less)(struct rb_node *, const struct rb_node *)) +{ + struct rb_node **link = &tree->rb_root.rb_node; + struct rb_node *parent = NULL; + bool leftmost = true; + + while (*link) { + parent = *link; + if (less(node, parent)) { + link = &parent->rb_left; + } else { + link = &parent->rb_right; + leftmost = false; + } + } + + rb_link_node(node, parent, link); + rb_insert_color_cached(node, tree, leftmost); + + return leftmost ? node : NULL; +} + +/** + * rb_add() - insert @node into @tree + * @node: node to insert + * @tree: tree to insert @node into + * @less: operator defining the (partial) node order + */ +static __always_inline void rb_add(struct rb_node *node, struct rb_root *tree, + bool (*less)(struct rb_node *, const struct rb_node *)) +{ + struct rb_node **link = &tree->rb_node; + struct rb_node *parent = NULL; + + while (*link) { + parent = *link; + if (less(node, parent)) + link = &parent->rb_left; + else + link = &parent->rb_right; + } + + rb_link_node(node, parent, link); + rb_insert_color(node, tree); +} + +/** + * rb_find_add() - find equivalent @node in @tree, or add @node + * @node: node to look-for / insert + * @tree: tree to search / modify + * @cmp: operator defining the node order + * + * Returns the rb_node matching @node, or NULL when no match is found and @node + * is inserted. + */ +static __always_inline struct rb_node *rb_find_add(struct rb_node *node, struct rb_root *tree, + int (*cmp)(struct rb_node *, + const struct rb_node *)) +{ + struct rb_node **link = &tree->rb_node; + struct rb_node *parent = NULL; + int c; + + while (*link) { + parent = *link; + c = cmp(node, parent); + + if (c < 0) + link = &parent->rb_left; + else if (c > 0) + link = &parent->rb_right; + else + return parent; + } + + rb_link_node(node, parent, link); + rb_insert_color(node, tree); + return NULL; +} + +/** + * rb_find() - find @key in tree @tree + * @key: key to match + * @tree: tree to search + * @cmp: operator defining the node order + * + * Returns the rb_node matching @key or NULL. + */ +static __always_inline struct rb_node *rb_find(const void *key, const struct rb_root *tree, + int (*cmp)(const void *key, const struct rb_node *)) +{ + struct rb_node *node = tree->rb_node; + + while (node) { + int c = cmp(key, node); + + if (c < 0) + node = node->rb_left; + else if (c > 0) + node = node->rb_right; + else + return node; + } + + return NULL; +} + +/** + * rb_find_first() - find the first @key in @tree + * @key: key to match + * @tree: tree to search + * @cmp: operator defining node order + * + * Returns the leftmost node matching @key, or NULL. + */ +static __always_inline struct rb_node *rb_find_first(const void *key, const struct rb_root *tree, + int (*cmp)(const void *key, + const struct rb_node *)) +{ + struct rb_node *node = tree->rb_node; + struct rb_node *match = NULL; + + while (node) { + int c = cmp(key, node); + + if (c <= 0) { + if (!c) + match = node; + node = node->rb_left; + } else if (c > 0) { + node = node->rb_right; + } + } + + return match; +} + +/** + * rb_next_match() - find the next @key in @tree + * @key: key to match + * @tree: tree to search + * @cmp: operator defining node order + * + * Returns the next node matching @key, or NULL. + */ +static __always_inline struct rb_node *rb_next_match(const void *key, struct rb_node *node, + int (*cmp)(const void *key, + const struct rb_node *)) +{ + node = rb_next(node); + if (node && cmp(key, node)) + node = NULL; + return node; +} + +/** + * rb_for_each() - iterates a subtree matching @key + * @node: iterator + * @key: key to match + * @tree: tree to search + * @cmp: operator defining node order + */ +#define rb_for_each(node, key, tree, cmp) \ + for ((node) = rb_find_first((key), (tree), (cmp)); (node); \ + (node) = rb_next_match((key), (node), (cmp))) + +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + (C) 2002 David Woodhouse + (C) 2012 Michel Lespinasse + + + linux/include/linux/rbtree_augmented.h +*/ + +/* + * Please note - only struct rb_augment_callbacks and the prototypes for + * rb_insert_augmented() and rb_erase_augmented() are intended to be public. + * The rest are implementation details you are not expected to depend on. + * + * See Documentation/core-api/rbtree.rst for documentation and samples. + */ + +struct rb_augment_callbacks { + void (*propagate)(struct rb_node *node, struct rb_node *stop); + void (*copy)(struct rb_node *old, struct rb_node *new); + void (*rotate)(struct rb_node *old, struct rb_node *new); +}; + +extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); + +/* + * Fixup the rbtree and update the augmented information when rebalancing. + * + * On insertion, the user must update the augmented information on the path + * leading to the inserted node, then call rb_link_node() as usual and + * rb_insert_augmented() instead of the usual rb_insert_color() call. + * If rb_insert_augmented() rebalances the rbtree, it will callback into + * a user provided function to update the augmented information on the + * affected subtrees. + */ +static inline void rb_insert_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + __rb_insert_augmented(node, root, augment->rotate); +} + +static inline void rb_insert_augmented_cached(struct rb_node *node, struct rb_root_cached *root, + bool newleft, + const struct rb_augment_callbacks *augment) +{ + if (newleft) + root->rb_leftmost = node; + rb_insert_augmented(node, &root->rb_root, augment); +} + +static __always_inline struct rb_node * +rb_add_augmented_cached(struct rb_node *node, struct rb_root_cached *tree, + bool (*less)(struct rb_node *, const struct rb_node *), + const struct rb_augment_callbacks *augment) +{ + struct rb_node **link = &tree->rb_root.rb_node; + struct rb_node *parent = NULL; + bool leftmost = true; + + while (*link) { + parent = *link; + if (less(node, parent)) { + link = &parent->rb_left; + } else { + link = &parent->rb_right; + leftmost = false; + } + } + + rb_link_node(node, parent, link); + augment->propagate(parent, NULL); /* suboptimal */ + rb_insert_augmented_cached(node, tree, leftmost, augment); + + return leftmost ? node : NULL; +} + +/* + * Template for declaring augmented rbtree callbacks (generic case) + * + * RBSTATIC: 'static' or empty + * RBNAME: name of the rb_augment_callbacks structure + * RBSTRUCT: struct type of the tree nodes + * RBFIELD: name of struct rb_node field within RBSTRUCT + * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree + * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data + */ + +#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \ + static inline void RBNAME##_propagate(struct rb_node *rb, struct rb_node *stop) \ + { \ + while (rb != stop) { \ + RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \ + if (RBCOMPUTE(node, true)) \ + break; \ + rb = rb_parent(&node->RBFIELD); \ + } \ + } \ + static inline void RBNAME##_copy(struct rb_node *rb_old, struct rb_node *rb_new) \ + { \ + RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ + RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ + new->RBAUGMENTED = old->RBAUGMENTED; \ + } \ + static void RBNAME##_rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ + { \ + RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ + RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ + new->RBAUGMENTED = old->RBAUGMENTED; \ + RBCOMPUTE(old, false); \ + } \ + RBSTATIC const struct rb_augment_callbacks RBNAME = { \ + .propagate = RBNAME##_propagate, .copy = RBNAME##_copy, .rotate = RBNAME##_rotate}; + +/* + * Template for declaring augmented rbtree callbacks, + * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes. + * + * RBSTATIC: 'static' or empty + * RBNAME: name of the rb_augment_callbacks structure + * RBSTRUCT: struct type of the tree nodes + * RBFIELD: name of struct rb_node field within RBSTRUCT + * RBTYPE: type of the RBAUGMENTED field + * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree + * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar + */ + +#define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, RBTYPE, RBAUGMENTED, \ + RBCOMPUTE) \ + static inline bool RBNAME##_compute_max(RBSTRUCT *node, bool exit) \ + { \ + RBSTRUCT *child; \ + RBTYPE max = RBCOMPUTE(node); \ + if (node->RBFIELD.rb_left) { \ + child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \ + if (child->RBAUGMENTED > max) \ + max = child->RBAUGMENTED; \ + } \ + if (node->RBFIELD.rb_right) { \ + child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \ + if (child->RBAUGMENTED > max) \ + max = child->RBAUGMENTED; \ + } \ + if (exit && node->RBAUGMENTED == max) \ + return true; \ + node->RBAUGMENTED = max; \ + return false; \ + } \ + RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME##_compute_max) + +#define RB_RED 0 +#define RB_BLACK 1 + +#define __rb_parent(pc) ((struct rb_node *)(pc & ~3)) + +#define __rb_color(pc) ((pc) & 1) +#define __rb_is_black(pc) __rb_color(pc) +#define __rb_is_red(pc) (!__rb_color(pc)) +#define rb_color(rb) __rb_color((rb)->__rb_parent_color) +#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color) +#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color) + +static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) +{ + rb->__rb_parent_color = rb_color(rb) + (unsigned long)p; +} + +static inline void rb_set_parent_color(struct rb_node *rb, struct rb_node *p, int color) +{ + rb->__rb_parent_color = (unsigned long)p + color; +} + +static inline void __rb_change_child(struct rb_node *old, struct rb_node *new, + struct rb_node *parent, struct rb_root *root) +{ + if (parent) { + if (parent->rb_left == old) + WRITE_ONCE(parent->rb_left, new); + else + WRITE_ONCE(parent->rb_right, new); + } else + WRITE_ONCE(root->rb_node, new); +} + +// static inline void __rb_change_child_rcu(struct rb_node *old, struct rb_node *new, +// struct rb_node *parent, struct rb_root *root) +// { +// if (parent) { +// if (parent->rb_left == old) +// rcu_assign_pointer(parent->rb_left, new); +// else +// rcu_assign_pointer(parent->rb_right, new); +// } else +// rcu_assign_pointer(root->rb_node, new); +// } + +extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); + +static __always_inline struct rb_node * +__rb_erase_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + struct rb_node *child = node->rb_right; + struct rb_node *tmp = node->rb_left; + struct rb_node *parent, *rebalance; + unsigned long pc; + + if (!tmp) { + /* + * Case 1: node to erase has no more than 1 child (easy!) + * + * Note that if there is one child it must be red due to 5) + * and node must be black due to 4). We adjust colors locally + * so as to bypass __rb_erase_color() later on. + */ + pc = node->__rb_parent_color; + parent = __rb_parent(pc); + __rb_change_child(node, child, parent, root); + if (child) { + child->__rb_parent_color = pc; + rebalance = NULL; + } else + rebalance = __rb_is_black(pc) ? parent : NULL; + tmp = parent; + } else if (!child) { + /* Still case 1, but this time the child is node->rb_left */ + tmp->__rb_parent_color = pc = node->__rb_parent_color; + parent = __rb_parent(pc); + __rb_change_child(node, tmp, parent, root); + rebalance = NULL; + tmp = parent; + } else { + struct rb_node *successor = child, *child2; + + tmp = child->rb_left; + if (!tmp) { + /* + * Case 2: node's successor is its right child + * + * (n) (s) + * / \ / \ + * (x) (s) -> (x) (c) + * \ + * (c) + */ + parent = successor; + child2 = successor->rb_right; + + augment->copy(node, successor); + } else { + /* + * Case 3: node's successor is leftmost under + * node's right child subtree + * + * (n) (s) + * / \ / \ + * (x) (y) -> (x) (y) + * / / + * (p) (p) + * / / + * (s) (c) + * \ + * (c) + */ + do { + parent = successor; + successor = tmp; + tmp = tmp->rb_left; + } while (tmp); + child2 = successor->rb_right; + WRITE_ONCE(parent->rb_left, child2); + WRITE_ONCE(successor->rb_right, child); + rb_set_parent(child, successor); + + augment->copy(node, successor); + augment->propagate(parent, successor); + } + + tmp = node->rb_left; + WRITE_ONCE(successor->rb_left, tmp); + rb_set_parent(tmp, successor); + + pc = node->__rb_parent_color; + tmp = __rb_parent(pc); + __rb_change_child(node, successor, tmp, root); + + if (child2) { + rb_set_parent_color(child2, parent, RB_BLACK); + rebalance = NULL; + } else { + rebalance = rb_is_black(successor) ? parent : NULL; + } + successor->__rb_parent_color = pc; + tmp = successor; + } + + augment->propagate(tmp, NULL); + return rebalance; +} + +static __always_inline void rb_erase_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); + if (rebalance) + __rb_erase_color(rebalance, root, augment->rotate); +} + +static __always_inline void rb_erase_augmented_cached(struct rb_node *node, + struct rb_root_cached *root, + const struct rb_augment_callbacks *augment) +{ + if (root->rb_leftmost == node) + root->rb_leftmost = rb_next(node); + rb_erase_augmented(node, &root->rb_root, augment); +} diff --git a/utils/include/utils/shm.h b/utils/include/utils/shm.h new file mode 100644 index 0000000..82fd891 --- /dev/null +++ b/utils/include/utils/shm.h @@ -0,0 +1,57 @@ +/* + * shm.h - shared memory communication + */ + +#pragma once + +#include + +#include +#include + +/* + * Shared memory pointer support. These are pointers that are passed across + * address spaces, so the mapped regions will have different base offsets. + */ + +typedef uintptr_t shmptr_t; + +/* shared memory pointers have a special non-zero NULL value */ +#define SHMPTR_NULL ULONG_MAX + +struct shm_region { + void *base; + size_t len; +}; + +/** + * ptr_to_shmptr - converts a normal pointer to a shared memory pointer + * @r: the shared memory region the pointer resides in + * @ptr: the normal pointer to convert + * @len: the size of the object + * + * Returns a shared memory pointer. + */ +static inline shmptr_t ptr_to_shmptr(struct shm_region *r, void *ptr, size_t len) +{ + assert((uintptr_t)r->base <= (uintptr_t)ptr); + assert((uintptr_t)ptr + len <= (uintptr_t)r->base + r->len); + return (uintptr_t)ptr - (uintptr_t)r->base; +} + +/** + * shmptr_to_ptr - converts a shared memory pointer to a normal pointer + * @r: the shared memory region the shared memory pointer resides in + * @shmptr: the shared memory pointer + * @len: the size of the object + * + * Returns a normal pointer, or NULL if the shared memory pointer is outside + * the region. + */ +static inline void *shmptr_to_ptr(struct shm_region *r, shmptr_t shmptr, size_t len) +{ + /* WARNING: could wrap around! */ + if (unlikely(ULONG_MAX - shmptr < r->len || shmptr + len > r->len)) + return NULL; + return (void *)(shmptr + (uintptr_t)r->base); +} diff --git a/utils/include/utils/spinlock.h b/utils/include/utils/spinlock.h new file mode 100644 index 0000000..756a3de --- /dev/null +++ b/utils/include/utils/spinlock.h @@ -0,0 +1,80 @@ +/* + * lock.h - locking primitives + */ + +#pragma once + +#include +#include +#include + +typedef struct { + volatile int locked; +} spinlock_t; + +#define SPINLOCK_INITIALIZER \ + { \ + .locked = 0 \ + } +#define DEFINE_SPINLOCK(name) spinlock_t name = SPINLOCK_INITIALIZER +#define DECLARE_SPINLOCK(name) extern spinlock_t name + +/** + * spin_lock_init - prepares a spin lock for use + * @l: the spin lock + */ +static inline void spin_lock_init(spinlock_t *l) +{ + l->locked = 0; +} + +/** + * spin_lock_held - determines if the lock is held + * @l: the spin lock + * + * Returns true if the lock is held. + */ +static inline bool spin_lock_held(const spinlock_t *l) +{ + return l->locked != 0; +} + +/** + * assert_spin_lock_held - asserts that the lock is currently held + * @l: the spin lock + */ +#define assert_spin_lock_held(l) assert(spin_lock_held(l)) + +/** + * spin_lock - takes a spin lock + * @l: the spin lock + */ +static inline void spin_lock(spinlock_t *l) +{ + while (__sync_lock_test_and_set(&l->locked, 1)) { + while (l->locked) cpu_relax(); + } +} + +/** + * spin_try_lock- takes a spin lock, but only if it is available + * @l: the spin lock + * + * Returns 1 if successful, otherwise 0 + */ +static inline bool spin_try_lock(spinlock_t *l) +{ + if (!__sync_lock_test_and_set(&l->locked, 1)) + return true; + return false; +} + +/** + * spin_unlock - releases a spin lock + * @l: the spin lock + */ +static inline void spin_unlock(spinlock_t *l) +{ + assert_spin_lock_held(l); + __sync_lock_release(&l->locked); +} diff --git a/utils/include/utils/syscalls.h b/utils/include/utils/syscalls.h new file mode 100644 index 0000000..3dfc514 --- /dev/null +++ b/utils/include/utils/syscalls.h @@ -0,0 +1,9 @@ +#pragma once + +#include +#include + +static inline pid_t _gettid() +{ + return syscall(SYS_gettid); +} diff --git a/utils/include/utils/time.h b/utils/include/utils/time.h new file mode 100644 index 0000000..979f7c1 --- /dev/null +++ b/utils/include/utils/time.h @@ -0,0 +1,66 @@ +#ifndef _TIME_H_ +#define _TIME_H_ + +#include +#include +#include + +typedef uint64_t __sec; +typedef uint64_t __nsec; +typedef uint64_t __usec; + +#define NSEC_PER_SEC (1000000000UL) +#define NSEC_PER_MSEC (1000000UL) +#define NSEC_PER_USEC (1000UL) +#define USEC_PER_SEC (1000000UL) +#define USEC_PER_MSEC (1000UL) +#define MSEC_PER_SEC (1000UL) + +#define time_nsec_to_sec(ns) ((ns) / NSEC_PER_SEC) +#define time_nsec_to_msec(ns) ((ns) / NSEC_PER_MSEC) +#define time_nsec_to_usec(ns) ((ns) / NSEC_PER_USEC) + +#define time_sec_to_nsec(sec) ((sec) * NSEC_PER_SEC) +#define time_msec_to_nsec(msec) ((msec) * NSEC_PER_MSEC) +#define time_usec_to_nsec(usec) ((usec) * NSEC_PER_USEC) + +extern __usec g_boot_time_us; + +static inline __nsec now_ns() +{ + struct timespec ts; + clock_gettime(CLOCK_REALTIME, &ts); + return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec; +} + +static inline __usec now_us() +{ + struct timespec ts; + clock_gettime(CLOCK_REALTIME, &ts); + return ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC; +} + +/// Return monotonic time in microseconds since system boot. +static inline __usec monotonic_us() +{ + return now_us() - g_boot_time_us; +} + +static inline uint64_t now_tsc() +{ + uint32_t lo, hi; + asm volatile("rdtscp" : "=a"(lo), "=d"(hi)::"rcx"); + return ((uint64_t)hi << 32) | lo; +} + +static inline void spin_until(__nsec deadline) +{ + while (now_ns() < deadline); +} + +static inline void spin(__nsec duration) +{ + spin_until(now_ns() + duration); +} + +#endif // _TIME_H_ diff --git a/utils/include/utils/types.h b/utils/include/utils/types.h new file mode 100644 index 0000000..088765f --- /dev/null +++ b/utils/include/utils/types.h @@ -0,0 +1,11 @@ +/* + * types.h - primitive type definitions + */ + +#pragma once + +#include +#include +#include + +struct empty_t {}; diff --git a/utils/include/utils/uintr.h b/utils/include/utils/uintr.h new file mode 100644 index 0000000..f02a4a9 --- /dev/null +++ b/utils/include/utils/uintr.h @@ -0,0 +1,43 @@ +#pragma once + +#ifdef SKYLOFT_UINTR + +#include +#include + +#define uintr_register_handler(handler, flags) syscall(__NR_uintr_register_handler, handler, flags) +#define uintr_unregister_handler(flags) syscall(__NR_uintr_unregister_handler, flags) +#define uintr_vector_fd(vector, flags) syscall(__NR_uintr_vector_fd, vector, flags) +#define uintr_register_sender(fd, flags) syscall(__NR_uintr_register_sender, fd, flags) +#define uintr_unregister_sender(ipi_idx, flags) \ + syscall(__NR_uintr_unregister_sender, ipi_idx, flags) +#define uintr_wait(usec, flags) syscall(__NR_uintr_wait, usec, flags) +#define uintr_register_self(vector, flags) syscall(__NR_uintr_register_self, vector, flags) +#define uintr_alt_stack(sp, size, flags) syscall(__NR_uintr_alt_stack, sp, size, flags) +#define uintr_ipi_fd(flags) syscall(__NR_uintr_ipi_fd, flags) + +#define __NR_uintr_register_handler 471 +#define __NR_uintr_unregister_handler 472 +#define __NR_uintr_vector_fd 473 +#define __NR_uintr_register_sender 474 +#define __NR_uintr_unregister_sender 475 +#define __NR_uintr_wait 476 +#define __NR_uintr_register_self 477 +#define __NR_uintr_alt_stack 478 +#define __NR_uintr_ipi_fd 479 + +#define local_irq_save(flags) \ + do { \ + flags = _testui(); \ + _clui(); \ + } while (0) +#define local_irq_restore(flags) \ + do { \ + if (flags) \ + _stui(); \ + } while (0) + +#else +#define local_irq_save(flags) +#define local_irq_restore(flags) +#endif diff --git a/utils/list.c b/utils/list.c new file mode 100644 index 0000000..fcc9cf5 --- /dev/null +++ b/utils/list.c @@ -0,0 +1,40 @@ +/* Licensed under BSD-MIT - see LICENSE file for details */ + +#include +#include + +#include +#include + +static void *corrupt(const char *abortstr, const struct list_node *head, + const struct list_node *node, unsigned int count) +{ + if (abortstr) { + panic("%s: prev corrupt in node %p (%u) of %p\n", abortstr, node, count, head); + } + return NULL; +} + +struct list_node *list_check_node(const struct list_node *node, const char *abortstr) +{ + const struct list_node *p, *n; + int count = 0; + + for (p = node, n = node->next; n != node; p = n, n = n->next) { + count++; + if (n->prev != p) + return corrupt(abortstr, node, n, count); + } + /* Check prev on head node. */ + if (node->prev != p) + return corrupt(abortstr, node, node, 0); + + return (struct list_node *)node; +} + +struct list_head *list_check(const struct list_head *h, const char *abortstr) +{ + if (!list_check_node(&h->n, abortstr)) + return NULL; + return (struct list_head *)h; +} diff --git a/utils/log.c b/utils/log.c new file mode 100644 index 0000000..93f646a --- /dev/null +++ b/utils/log.c @@ -0,0 +1,85 @@ +/* + * log.c - the logging system + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define MAX_LOG_LEN 4096 + +/* stored here to avoid pushing too much on the stack */ +static __thread char buf[MAX_LOG_LEN]; + +extern __thread int g_logic_cpu_id; + +static inline int log_color(int level) +{ + switch (level) { + case LOG_CRIT: + return 31; // red + case LOG_ERR: + return 31; // red + case LOG_WARN: + return 33; // yellow + case LOG_NOTICE: + return 35; // magenta + case LOG_INFO: + return 32; // green + case LOG_DEBUG: + return 36; // cyan + default: + return 0; + } +} + +void logk(int level, const char *fmt, ...) +{ + int flags; + va_list ptr; + + local_irq_save(flags); + + int cpu = g_logic_cpu_id; + __usec us = monotonic_us(); + sprintf(buf, "\x1b[37m[%ld.%06d %d] \x1b[%dm", us / USEC_PER_SEC, (int)(us % USEC_PER_SEC), cpu, + log_color(level)); + + off_t off = strlen(buf); + va_start(ptr, fmt); + vsnprintf(buf + off, MAX_LOG_LEN - off, fmt, ptr); + va_end(ptr); + printf("%s\x1b[m\n", buf); + + if (level <= LOG_ERR) + fflush(stdout); + + local_irq_restore(flags); +} + +#define MAX_CALL_DEPTH 256 +void logk_backtrace(void) +{ + void *buf[MAX_CALL_DEPTH]; + const int calls = backtrace(buf, ARRAY_SIZE(buf)); + backtrace_symbols_fd(buf, calls, 1); +} + +void logk_bug(bool fatal, const char *expr, const char *file, int line, const char *func) +{ + logk(fatal ? LOG_CRIT : LOG_WARN, "%s: %s:%d ASSERTION '%s' FAILED IN '%s'", + fatal ? "FATAL" : "WARN", file, line, expr, func); + + if (fatal) { + logk_backtrace(); + exit(EXIT_FAILURE); + } +} diff --git a/utils/lrpc.c b/utils/lrpc.c new file mode 100644 index 0000000..20f2463 --- /dev/null +++ b/utils/lrpc.c @@ -0,0 +1,27 @@ +/* + * lrpc.c - shared memory communication channels + */ + +#include +#include + +#include + +/** + * lrpc_init - initializes a shared memory channel + * @chan: the channel struct to initialize + * @size: the number of message elements in the buffer + * + * returns 0 if successful, or -EINVAL if @size is not a power of two. +// */ +int lrpc_init(struct lrpc_chan *chan, struct lrpc_msg *tbl, unsigned int size) +{ + if (!is_power_of_two(size)) + return -EINVAL; + + memset(chan, 0, sizeof(*chan)); + chan->out.tbl = chan->in.tbl = tbl; + chan->out.size = chan->in.size = size; + chan->out.mask = chan->in.mask = size - 1; + return 0; +} diff --git a/utils/rbtree.c b/utils/rbtree.c new file mode 100644 index 0000000..16108d6 --- /dev/null +++ b/utils/rbtree.c @@ -0,0 +1,595 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + (C) 2002 David Woodhouse + (C) 2012 Michel Lespinasse + + + linux/lib/rbtree.c +*/ + +#include + +/* + * red-black trees properties: https://en.wikipedia.org/wiki/Rbtree + * + * 1) A node is either red or black + * 2) The root is black + * 3) All leaves (NULL) are black + * 4) Both children of every red node are black + * 5) Every simple path from root to leaves contains the same number + * of black nodes. + * + * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two + * consecutive red nodes in a path and every red node is therefore followed by + * a black. So if B is the number of black nodes on every simple path (as per + * 5), then the longest possible path due to 4 is 2B. + * + * We shall indicate color with case, where black nodes are uppercase and red + * nodes will be lowercase. Unknown color nodes shall be drawn as red within + * parentheses and have some accompanying text comment. + */ + +/* + * Notes on lockless lookups: + * + * All stores to the tree structure (rb_left and rb_right) must be done using + * WRITE_ONCE(). And we must not inadvertently cause (temporary) loops in the + * tree structure as seen in program order. + * + * These two requirements will allow lockless iteration of the tree -- not + * correct iteration mind you, tree rotations are not atomic so a lookup might + * miss entire subtrees. + * + * But they do guarantee that any such traversal will only see valid elements + * and that it will indeed complete -- does not get stuck in a loop. + * + * It also guarantees that if the lookup returns an element it is the 'correct' + * one. But not returning an element does _NOT_ mean it's not present. + * + * NOTE: + * + * Stores to __rb_parent_color are not important for simple lookups so those + * are left undone as of now. Nor did I check for loops involving parent + * pointers. + */ + +static inline void rb_set_black(struct rb_node *rb) +{ + rb->__rb_parent_color += RB_BLACK; +} + +static inline struct rb_node *rb_red_parent(struct rb_node *red) +{ + return (struct rb_node *)red->__rb_parent_color; +} + +/* + * Helper function for rotations: + * - old's parent and color get assigned to new + * - old gets assigned new as a parent and 'color' as a color. + */ +static inline void __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, + struct rb_root *root, int color) +{ + struct rb_node *parent = rb_parent(old); + new->__rb_parent_color = old->__rb_parent_color; + rb_set_parent_color(old, new, color); + __rb_change_child(old, new, parent, root); +} + +static __always_inline void __rb_insert(struct rb_node *node, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, + struct rb_node *new)) +{ + struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; + + while (true) { + /* + * Loop invariant: node is red. + */ + if (unlikely(!parent)) { + /* + * The inserted node is root. Either this is the + * first node, or we recursed at Case 1 below and + * are no longer violating 4). + */ + rb_set_parent_color(node, NULL, RB_BLACK); + break; + } + + /* + * If there is a black parent, we are done. + * Otherwise, take some corrective action as, + * per 4), we don't want a red root or two + * consecutive red nodes. + */ + if (rb_is_black(parent)) + break; + + gparent = rb_red_parent(parent); + + tmp = gparent->rb_right; + if (parent != tmp) { /* parent == gparent->rb_left */ + if (tmp && rb_is_red(tmp)) { + /* + * Case 1 - node's uncle is red (color flips). + * + * G g + * / \ / \ + * p u --> P U + * / / + * n n + * + * However, since g's parent might be red, and + * 4) does not allow this, we need to recurse + * at g. + */ + rb_set_parent_color(tmp, gparent, RB_BLACK); + rb_set_parent_color(parent, gparent, RB_BLACK); + node = gparent; + parent = rb_parent(node); + rb_set_parent_color(node, parent, RB_RED); + continue; + } + + tmp = parent->rb_right; + if (node == tmp) { + /* + * Case 2 - node's uncle is black and node is + * the parent's right child (left rotate at parent). + * + * G G + * / \ / \ + * p U --> n U + * \ / + * n p + * + * This still leaves us in violation of 4), the + * continuation into Case 3 will fix that. + */ + tmp = node->rb_left; + WRITE_ONCE(parent->rb_right, tmp); + WRITE_ONCE(node->rb_left, parent); + if (tmp) + rb_set_parent_color(tmp, parent, RB_BLACK); + rb_set_parent_color(parent, node, RB_RED); + augment_rotate(parent, node); + parent = node; + tmp = node->rb_right; + } + + /* + * Case 3 - node's uncle is black and node is + * the parent's left child (right rotate at gparent). + * + * G P + * / \ / \ + * p U --> n g + * / \ + * n U + */ + WRITE_ONCE(gparent->rb_left, tmp); /* == parent->rb_right */ + WRITE_ONCE(parent->rb_right, gparent); + if (tmp) + rb_set_parent_color(tmp, gparent, RB_BLACK); + __rb_rotate_set_parents(gparent, parent, root, RB_RED); + augment_rotate(gparent, parent); + break; + } else { + tmp = gparent->rb_left; + if (tmp && rb_is_red(tmp)) { + /* Case 1 - color flips */ + rb_set_parent_color(tmp, gparent, RB_BLACK); + rb_set_parent_color(parent, gparent, RB_BLACK); + node = gparent; + parent = rb_parent(node); + rb_set_parent_color(node, parent, RB_RED); + continue; + } + + tmp = parent->rb_left; + if (node == tmp) { + /* Case 2 - right rotate at parent */ + tmp = node->rb_right; + WRITE_ONCE(parent->rb_left, tmp); + WRITE_ONCE(node->rb_right, parent); + if (tmp) + rb_set_parent_color(tmp, parent, RB_BLACK); + rb_set_parent_color(parent, node, RB_RED); + augment_rotate(parent, node); + parent = node; + tmp = node->rb_left; + } + + /* Case 3 - left rotate at gparent */ + WRITE_ONCE(gparent->rb_right, tmp); /* == parent->rb_left */ + WRITE_ONCE(parent->rb_left, gparent); + if (tmp) + rb_set_parent_color(tmp, gparent, RB_BLACK); + __rb_rotate_set_parents(gparent, parent, root, RB_RED); + augment_rotate(gparent, parent); + break; + } + } +} + +/* + * Inline version for rb_erase() use - we want to be able to inline + * and eliminate the dummy_rotate callback there + */ +static __always_inline void ____rb_erase_color(struct rb_node *parent, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, + struct rb_node *new)) +{ + struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; + + while (true) { + /* + * Loop invariants: + * - node is black (or NULL on first iteration) + * - node is not the root (parent is not NULL) + * - All leaf paths going through parent and node have a + * black node count that is 1 lower than other leaf paths. + */ + sibling = parent->rb_right; + if (node != sibling) { /* node == parent->rb_left */ + if (rb_is_red(sibling)) { + /* + * Case 1 - left rotate at parent + * + * P S + * / \ / \ + * N s --> p Sr + * / \ / \ + * Sl Sr N Sl + */ + tmp1 = sibling->rb_left; + WRITE_ONCE(parent->rb_right, tmp1); + WRITE_ONCE(sibling->rb_left, parent); + rb_set_parent_color(tmp1, parent, RB_BLACK); + __rb_rotate_set_parents(parent, sibling, root, RB_RED); + augment_rotate(parent, sibling); + sibling = tmp1; + } + tmp1 = sibling->rb_right; + if (!tmp1 || rb_is_black(tmp1)) { + tmp2 = sibling->rb_left; + if (!tmp2 || rb_is_black(tmp2)) { + /* + * Case 2 - sibling color flip + * (p could be either color here) + * + * (p) (p) + * / \ / \ + * N S --> N s + * / \ / \ + * Sl Sr Sl Sr + * + * This leaves us violating 5) which + * can be fixed by flipping p to black + * if it was red, or by recursing at p. + * p is red when coming from Case 1. + */ + rb_set_parent_color(sibling, parent, RB_RED); + if (rb_is_red(parent)) + rb_set_black(parent); + else { + node = parent; + parent = rb_parent(node); + if (parent) + continue; + } + break; + } + /* + * Case 3 - right rotate at sibling + * (p could be either color here) + * + * (p) (p) + * / \ / \ + * N S --> N sl + * / \ \ + * sl Sr S + * \ + * Sr + * + * Note: p might be red, and then both + * p and sl are red after rotation(which + * breaks property 4). This is fixed in + * Case 4 (in __rb_rotate_set_parents() + * which set sl the color of p + * and set p RB_BLACK) + * + * (p) (sl) + * / \ / \ + * N sl --> P S + * \ / \ + * S N Sr + * \ + * Sr + */ + tmp1 = tmp2->rb_right; + WRITE_ONCE(sibling->rb_left, tmp1); + WRITE_ONCE(tmp2->rb_right, sibling); + WRITE_ONCE(parent->rb_right, tmp2); + if (tmp1) + rb_set_parent_color(tmp1, sibling, RB_BLACK); + augment_rotate(sibling, tmp2); + tmp1 = sibling; + sibling = tmp2; + } + /* + * Case 4 - left rotate at parent + color flips + * (p and sl could be either color here. + * After rotation, p becomes black, s acquires + * p's color, and sl keeps its color) + * + * (p) (s) + * / \ / \ + * N S --> P Sr + * / \ / \ + * (sl) sr N (sl) + */ + tmp2 = sibling->rb_left; + WRITE_ONCE(parent->rb_right, tmp2); + WRITE_ONCE(sibling->rb_left, parent); + rb_set_parent_color(tmp1, sibling, RB_BLACK); + if (tmp2) + rb_set_parent(tmp2, parent); + __rb_rotate_set_parents(parent, sibling, root, RB_BLACK); + augment_rotate(parent, sibling); + break; + } else { + sibling = parent->rb_left; + if (rb_is_red(sibling)) { + /* Case 1 - right rotate at parent */ + tmp1 = sibling->rb_right; + WRITE_ONCE(parent->rb_left, tmp1); + WRITE_ONCE(sibling->rb_right, parent); + rb_set_parent_color(tmp1, parent, RB_BLACK); + __rb_rotate_set_parents(parent, sibling, root, RB_RED); + augment_rotate(parent, sibling); + sibling = tmp1; + } + tmp1 = sibling->rb_left; + if (!tmp1 || rb_is_black(tmp1)) { + tmp2 = sibling->rb_right; + if (!tmp2 || rb_is_black(tmp2)) { + /* Case 2 - sibling color flip */ + rb_set_parent_color(sibling, parent, RB_RED); + if (rb_is_red(parent)) + rb_set_black(parent); + else { + node = parent; + parent = rb_parent(node); + if (parent) + continue; + } + break; + } + /* Case 3 - left rotate at sibling */ + tmp1 = tmp2->rb_left; + WRITE_ONCE(sibling->rb_right, tmp1); + WRITE_ONCE(tmp2->rb_left, sibling); + WRITE_ONCE(parent->rb_left, tmp2); + if (tmp1) + rb_set_parent_color(tmp1, sibling, RB_BLACK); + augment_rotate(sibling, tmp2); + tmp1 = sibling; + sibling = tmp2; + } + /* Case 4 - right rotate at parent + color flips */ + tmp2 = sibling->rb_right; + WRITE_ONCE(parent->rb_left, tmp2); + WRITE_ONCE(sibling->rb_right, parent); + rb_set_parent_color(tmp1, sibling, RB_BLACK); + if (tmp2) + rb_set_parent(tmp2, parent); + __rb_rotate_set_parents(parent, sibling, root, RB_BLACK); + augment_rotate(parent, sibling); + break; + } + } +} + +/* Non-inline version for rb_erase_augmented() use */ +void __rb_erase_color(struct rb_node *parent, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) +{ + ____rb_erase_color(parent, root, augment_rotate); +} + +/* + * Non-augmented rbtree manipulation functions. + * + * We use dummy augmented callbacks here, and have the compiler optimize them + * out of the rb_insert_color() and rb_erase() function definitions. + */ + +static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {} +static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {} +static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {} + +static const struct rb_augment_callbacks dummy_callbacks = { + .propagate = dummy_propagate, .copy = dummy_copy, .rotate = dummy_rotate}; + +void rb_insert_color(struct rb_node *node, struct rb_root *root) +{ + __rb_insert(node, root, dummy_rotate); +} + +void rb_erase(struct rb_node *node, struct rb_root *root) +{ + struct rb_node *rebalance; + rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); + if (rebalance) + ____rb_erase_color(rebalance, root, dummy_rotate); +} + +/* + * Augmented rbtree manipulation functions. + * + * This instantiates the same __always_inline functions as in the non-augmented + * case, but this time with user-defined callbacks. + */ + +void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) +{ + __rb_insert(node, root, augment_rotate); +} + +/* + * This function returns the first node (in sort order) of the tree. + */ +struct rb_node *rb_first(const struct rb_root *root) +{ + struct rb_node *n; + + n = root->rb_node; + if (!n) + return NULL; + while (n->rb_left) n = n->rb_left; + return n; +} + +struct rb_node *rb_last(const struct rb_root *root) +{ + struct rb_node *n; + + n = root->rb_node; + if (!n) + return NULL; + while (n->rb_right) n = n->rb_right; + return n; +} + +struct rb_node *rb_next(const struct rb_node *node) +{ + struct rb_node *parent; + + if (RB_EMPTY_NODE(node)) + return NULL; + + /* + * If we have a right-hand child, go down and then left as far + * as we can. + */ + if (node->rb_right) { + node = node->rb_right; + while (node->rb_left) node = node->rb_left; + return (struct rb_node *)node; + } + + /* + * No right-hand children. Everything down and left is smaller than us, + * so any 'next' node must be in the general direction of our parent. + * Go up the tree; any time the ancestor is a right-hand child of its + * parent, keep going up. First time it's a left-hand child of its + * parent, said parent is our 'next' node. + */ + while ((parent = rb_parent(node)) && node == parent->rb_right) node = parent; + + return parent; +} + +struct rb_node *rb_prev(const struct rb_node *node) +{ + struct rb_node *parent; + + if (RB_EMPTY_NODE(node)) + return NULL; + + /* + * If we have a left-hand child, go down and then right as far + * as we can. + */ + if (node->rb_left) { + node = node->rb_left; + while (node->rb_right) node = node->rb_right; + return (struct rb_node *)node; + } + + /* + * No left-hand children. Go up till we find an ancestor which + * is a right-hand child of its parent. + */ + while ((parent = rb_parent(node)) && node == parent->rb_left) node = parent; + + return parent; +} + +void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root) +{ + struct rb_node *parent = rb_parent(victim); + + /* Copy the pointers/colour from the victim to the replacement */ + *new = *victim; + + /* Set the surrounding nodes to point to the replacement */ + if (victim->rb_left) + rb_set_parent(victim->rb_left, new); + if (victim->rb_right) + rb_set_parent(victim->rb_right, new); + __rb_change_child(victim, new, parent, root); +} + +// void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, struct rb_root *root) +// { +// struct rb_node *parent = rb_parent(victim); + +// /* Copy the pointers/colour from the victim to the replacement */ +// *new = *victim; + +// /* Set the surrounding nodes to point to the replacement */ +// if (victim->rb_left) +// rb_set_parent(victim->rb_left, new); +// if (victim->rb_right) +// rb_set_parent(victim->rb_right, new); + +// /* Set the parent's pointer to the new node last after an RCU barrier +// * so that the pointers onwards are seen to be set correctly when doing +// * an RCU walk over the tree. +// */ +// __rb_change_child_rcu(victim, new, parent, root); +// } + +static struct rb_node *rb_left_deepest_node(const struct rb_node *node) +{ + for (;;) { + if (node->rb_left) + node = node->rb_left; + else if (node->rb_right) + node = node->rb_right; + else + return (struct rb_node *)node; + } +} + +struct rb_node *rb_next_postorder(const struct rb_node *node) +{ + const struct rb_node *parent; + if (!node) + return NULL; + parent = rb_parent(node); + + /* If we're sitting on node, we've already seen our children */ + if (parent && node == parent->rb_left && parent->rb_right) { + /* If we are the parent's left node, go to the parent's right + * node then all the way down to the left */ + return rb_left_deepest_node(parent->rb_right); + } else + /* Otherwise we are the parent's right node, and the parent + * should be next */ + return (struct rb_node *)parent; +} + +struct rb_node *rb_first_postorder(const struct rb_root *root) +{ + if (!root->rb_node) + return NULL; + + return rb_left_deepest_node(root->rb_node); +} \ No newline at end of file