diff --git a/README.md b/README.md index d0d0f33..631a9d2 100644 --- a/README.md +++ b/README.md @@ -168,6 +168,8 @@ The key components are containers also. See [bpftune-netns (8)](./docs/bpftune-netns.rst) - UDP buffer tuner: auto-tune buffers relating to UDP. See [bpftune-udp-buffer (8)](./docs/bpftune-udp-buffer.rst) +- Gaming tuner: detects sustained low-latency UDP activity and applies + per-profile network tweaks for gaming workloads. ## Code organization diff --git a/docs/bpftune-gaming.rst b/docs/bpftune-gaming.rst new file mode 100644 index 0000000..f0c41bf --- /dev/null +++ b/docs/bpftune-gaming.rst @@ -0,0 +1,26 @@ +# Gaming tuner profile guide + +The gaming tuner watches outgoing UDP traffic (≤1500 byte payloads) and +counts packets-per-second. If traffic stays above ~25 PPS for two sampling +windows with stable variance, the tuner classifies the session and applies +one of three profiles: + +- **CASUAL** – Keeps socket defaults at 262 KB, backlog at 5 K, budgets modest, + and busy-read/busy-poll around 25 µs. Suitable for lighter multiplayer or + machines where background work still matters. +- **COMPETITIVE** – Raises rmem/wmem limits to 16 MB, bumps NAPI budgets, and + shortens interrupt coalescing for twitch shooters or MOBAs on mainstream + hardware. +- **INTENSE** – Pushes UDP/TCP caps to ~33 MB, maximizes NAPI budgets, and + keeps NIC interrupts as immediate as the driver allows. Use for VR streaming, + LAN events, or when squeezing the absolute lowest latency from a well-sized + system. + +When traffic quiets for ~10 s the tuner rolls every sysctl back to its cached +baseline and logs the restoration. If a system feels resource constrained, +drop down one profile tier or adjust the busy poll/read tunables to ~15–25 µs +before rebuilding the gaming tuner shared object. + +The detection thresholds live in `src/gaming_tuner.h` (`GAMING_TUNER_UDP_MAX_SIZE` +and `GAMING_TUNER_UDP_MIN_PPS`). They can be tuned and rebuilt if a title has +unusual packet sizing or pacing requirements. diff --git a/include/bpftune/bpftune.h b/include/bpftune/bpftune.h index eab4498..f79f6d5 100644 --- a/include/bpftune/bpftune.h +++ b/include/bpftune/bpftune.h @@ -80,12 +80,23 @@ enum bpftune_state { BPFTUNE_GONE, /* resource gone */ }; +enum bpftunable_scenario_flags { + BPFTUNABLE_SCENARIO_QUIET = 0x1, +}; + struct bpftunable_scenario { unsigned int id; const char *name; const char *description; + unsigned int flags; }; +#define BPFTUNABLE_SCENARIO(_id, _name, _description) \ + { (_id), (_name), (_description), 0 } + +#define BPFTUNABLE_SCENARIO_FLAGS(_id, _name, _description, _flags) \ + { (_id), (_name), (_description), (_flags) } + /* some tunables are defined as triples */ #define BPFTUNE_MAX_VALUES 3 diff --git a/src/Makefile b/src/Makefile index 8d9205d..5ddf4a4 100644 --- a/src/Makefile +++ b/src/Makefile @@ -100,7 +100,7 @@ endif TUNERS = tcp_buffer_tuner route_table_tuner neigh_table_tuner sysctl_tuner \ tcp_conn_tuner netns_tuner net_buffer_tuner ip_frag_tuner \ - udp_buffer_tuner + udp_buffer_tuner gaming_tuner TUNER_OBJS = $(patsubst %,%.o,$(TUNERS)) TUNER_SRCS = $(patsubst %,%.c,$(TUNERS)) diff --git a/src/gaming_tuner.bpf.c b/src/gaming_tuner.bpf.c new file mode 100644 index 0000000..e92f95d --- /dev/null +++ b/src/gaming_tuner.bpf.c @@ -0,0 +1,483 @@ +// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +/* + * Gaming Performance Tuner - eBPF detector + * Identifies competitive gaming traffic patterns and notifies userspace. + */ + +#include +#include "gaming_tuner.h" +#include +#include + +#define GAMING_INTENSITY_DWELL_NS (5ULL * 1000000000ULL) + +struct gaming_stats { + __u64 udp_packets; + __u64 tracked_udp_packets; + __u64 last_activity; + __u32 current_pps; + __u32 is_gaming; + __u32 game_intensity; + __u32 steady_periods; + __u32 calm_periods; + __u32 intensity_candidate; + __u32 intensity_confidence; + __u32 reported_intensity; + __u32 pps_history[GAMING_TUNER_PPS_HISTORY]; + __u32 pps_history_idx; + __u64 last_pps_update; + __u32 pps_variance; +#ifndef BPFTUNE_LEGACY + struct bpf_timer timeout_timer; +#endif + __u32 active_ifindex; + char current_comm[GAMING_TUNER_COMM_LEN]; + __u64 last_intensity_change; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, struct gaming_stats); + __uint(max_entries, 1); +} stats_map SEC(".maps"); + +static __always_inline __u32 calculate_smooth_pps(struct gaming_stats *stats) +{ + __u32 sum = 0; + __u32 count = 0; + + for (int i = 0; i < (int)GAMING_TUNER_PPS_HISTORY; i++) { + if (stats->pps_history[i]) { + sum += stats->pps_history[i]; + count++; + } + } + + return count ? sum / count : 0; +} + +static __always_inline __u32 calculate_pps_variance(struct gaming_stats *stats, __u32 avg) +{ + __u32 variance_sum = 0; + __u32 count = 0; + + for (int i = 0; i < (int)GAMING_TUNER_PPS_HISTORY; i++) { + __u32 value = stats->pps_history[i]; + if (!value) + continue; + + if (value > avg) + variance_sum += value - avg; + else + variance_sum += avg - value; + count++; + } + + return count ? variance_sum / count : 0; +} + +static __always_inline void notify_userspace(__u32 scenario, __u32 intensity, + __u32 pps, __u32 variance, + __u32 ifindex, + const char *comm) +{ + struct bpftune_event event = {}; + struct gaming_event_data *payload; + + event.pid = (__u32)(bpf_get_current_pid_tgid() >> 32); + event.tuner_id = tuner_id; + event.scenario_id = scenario; + payload = (struct gaming_event_data *)event.raw_data; + payload->intensity = intensity; + payload->pps = pps; + payload->variance = variance; + payload->ifindex = ifindex; + + if (comm) { + for (int i = 0; i < GAMING_TUNER_COMM_LEN; i++) { + payload->comm[i] = comm[i]; + if (!comm[i]) + break; + } + } + + bpf_ringbuf_output(&ring_buffer_map, &event, sizeof(event), 0); +} + +#define GAMING_PPS_WINDOW_NS 1000000000ULL + +#ifndef BPFTUNE_LEGACY +static __always_inline void gaming_timer_schedule(struct gaming_stats *stats, __u64 delay) +{ + if (!stats) + return; + + bpf_timer_start(&stats->timeout_timer, delay, 0); +} + +static int gaming_timeout_cb(void *map, int *key, struct gaming_stats *stats) +{ + __u64 now; + __u64 inactivity; + + if (!stats || !stats->is_gaming) + return 0; + + now = bpf_ktime_get_ns(); + if (!stats->last_activity) { + gaming_timer_schedule(stats, GAMING_TUNER_TIMEOUT_NS); + return 0; + } + + inactivity = now - stats->last_activity; + if (inactivity >= GAMING_TUNER_TIMEOUT_NS) { + notify_userspace(GAMING_SCENARIO_ENDED, + stats->game_intensity, + stats->current_pps, + stats->pps_variance, + stats->active_ifindex, + stats->current_comm); + stats->is_gaming = 0; + stats->steady_periods = 0; + stats->game_intensity = 0; + stats->current_pps = 0; + stats->pps_variance = 0; + stats->current_comm[0] = '\0'; + stats->last_intensity_change = 0; + return 0; + } + + gaming_timer_schedule(stats, GAMING_TUNER_TIMEOUT_NS - inactivity); + return 0; +} +#endif + +static __always_inline void record_activity(struct gaming_stats *stats, __u64 now) +{ + if (!stats) + return; + + stats->last_activity = now; +#ifndef BPFTUNE_LEGACY + if (stats->is_gaming) + gaming_timer_schedule(stats, GAMING_TUNER_TIMEOUT_NS); +#endif +} + +static __always_inline void handle_pps_window(struct gaming_stats *stats, __u64 now); + +static __always_inline __u16 gaming_sock_dport(const struct sock *sk) +{ + __u16 dport = 0; + + if (!sk) + return 0; + + dport = BPF_CORE_READ(sk, __sk_common.skc_dport); + return bpf_ntohs(dport); +} + +static __always_inline void gaming_count_packet(struct gaming_stats *stats, + __u64 now, __u64 len, + __u16 dport) +{ + if (!stats) + return; + + if (len <= GAMING_TUNER_UDP_MAX_SIZE) { + stats->tracked_udp_packets++; + + if (now - stats->last_pps_update >= GAMING_PPS_WINDOW_NS) + handle_pps_window(stats, now); + } +} + +static __always_inline __u32 gaming_determine_intensity(__u32 current, + __u32 pps) +{ + if (current >= 2) { + if (pps <= GAMING_TUNER_INTENSE_LOW) + return 1; + return 2; + } + + if (current == 1) { + if (pps >= GAMING_TUNER_INTENSE_HIGH) + return 2; + if (pps <= GAMING_TUNER_COMP_LOW) + return 0; + return 1; + } + + if (pps >= GAMING_TUNER_INTENSE_HIGH) + return 2; + if (pps >= GAMING_TUNER_COMP_HIGH) + return 1; + return 0; +} + +static __always_inline void handle_pps_window(struct gaming_stats *stats, __u64 now) +{ + __u32 smooth_pps; + __u32 variance; + __u64 recent_packets; + char current_comm[GAMING_TUNER_COMM_LEN] = {}; + + stats->pps_history_idx = (stats->pps_history_idx + 1) % GAMING_TUNER_PPS_HISTORY; + stats->pps_history[stats->pps_history_idx] = stats->tracked_udp_packets; + + smooth_pps = calculate_smooth_pps(stats); + variance = calculate_pps_variance(stats, smooth_pps); + + recent_packets = stats->tracked_udp_packets; + + stats->current_pps = smooth_pps; + stats->pps_variance = variance; + stats->tracked_udp_packets = 0; + stats->last_pps_update = now; + + if (bpf_get_current_comm(current_comm, sizeof(current_comm)) == 0) { + __builtin_memset(stats->current_comm, 0, sizeof(stats->current_comm)); + for (int i = 0; i < GAMING_TUNER_COMM_LEN; i++) { + stats->current_comm[i] = current_comm[i]; + if (!current_comm[i]) + break; + } + } + + if (!stats->is_gaming) { + if (smooth_pps >= GAMING_TUNER_UDP_MIN_PPS) { + __u32 variance_threshold = smooth_pps / 2; + __u32 variance_rel; + bool bursty = recent_packets >= (GAMING_TUNER_UDP_MIN_PPS * 2); + + if (variance_threshold < GAMING_TUNER_VARIANCE_MIN) + variance_threshold = GAMING_TUNER_VARIANCE_MIN; + + variance_rel = (__u32)(((__u64)smooth_pps * GAMING_TUNER_VARIANCE_REL_NUM) / + GAMING_TUNER_VARIANCE_REL_DEN); + if (variance_rel < GAMING_TUNER_VARIANCE_MIN) + variance_rel = GAMING_TUNER_VARIANCE_MIN; + if (variance_threshold < variance_rel) + variance_threshold = variance_rel; + + if (!bursty && smooth_pps < GAMING_TUNER_UDP_MIN_PPS + 3) { + stats->steady_periods = 0; + return; + } + + if (variance <= variance_threshold || bursty) + stats->steady_periods++; + else + stats->steady_periods = 0; + + if ((bursty && stats->steady_periods >= 2) || + (!bursty && stats->steady_periods >= 3)) { + __u32 start_intensity; + + if (stats->current_comm[0] == '\0') { + stats->steady_periods = 0; + return; + } + + stats->is_gaming = 1; + stats->steady_periods = 0; + stats->calm_periods = 0; + + start_intensity = gaming_determine_intensity(0, smooth_pps); + stats->game_intensity = start_intensity; + stats->reported_intensity = start_intensity; + stats->intensity_candidate = start_intensity; + stats->intensity_confidence = 1; + + notify_userspace(GAMING_SCENARIO_DETECTED, + start_intensity, + smooth_pps, + variance, + stats->active_ifindex, + stats->current_comm); + stats->last_intensity_change = now; +#ifndef BPFTUNE_LEGACY + gaming_timer_schedule(stats, GAMING_TUNER_TIMEOUT_NS); +#endif + } + } + return; + } + +#ifdef BPFTUNE_LEGACY + if (now - stats->last_activity > GAMING_TUNER_TIMEOUT_NS) { + notify_userspace(GAMING_SCENARIO_ENDED, + stats->game_intensity, + smooth_pps, + variance, + stats->active_ifindex, + stats->current_comm); + stats->is_gaming = 0; + stats->steady_periods = 0; + stats->calm_periods = 0; + stats->game_intensity = 0; + stats->current_comm[0] = '\0'; + return; + } +#else + gaming_timer_schedule(stats, GAMING_TUNER_TIMEOUT_NS); +#endif + + if (smooth_pps <= GAMING_TUNER_IDLE_PPS) + stats->calm_periods++; + else + stats->calm_periods = 0; + + if (stats->calm_periods >= 3) { + notify_userspace(GAMING_SCENARIO_ENDED, + stats->game_intensity, + smooth_pps, + variance, + stats->active_ifindex, + stats->current_comm); + stats->is_gaming = 0; + stats->steady_periods = 0; + stats->calm_periods = 0; + stats->game_intensity = 0; + stats->reported_intensity = 0; + stats->intensity_candidate = 0; + stats->intensity_confidence = 0; + stats->current_comm[0] = '\0'; + stats->last_intensity_change = 0; + return; + } + + { + __u32 candidate; + + candidate = gaming_determine_intensity(stats->reported_intensity, smooth_pps); + + if (candidate == 0 && stats->reported_intensity > 0 && + smooth_pps > GAMING_TUNER_IDLE_PPS) + candidate = stats->reported_intensity; + + if (stats->current_comm[0] == '\0') + return; + + if (candidate != stats->intensity_candidate) { + stats->intensity_candidate = candidate; + stats->intensity_confidence = 1; + } else if (stats->intensity_confidence < 5) { + stats->intensity_confidence++; + } + + if (candidate != stats->reported_intensity) { + __u32 required; + __u64 since_change; + + since_change = stats->last_intensity_change ? + (now - stats->last_intensity_change) : + GAMING_INTENSITY_DWELL_NS; + + if (since_change < GAMING_INTENSITY_DWELL_NS) + goto out; + + if (candidate > stats->reported_intensity) + required = 3; + else if (candidate == 0) + required = 6; + else + required = 4; + + if (stats->intensity_confidence >= required) { + stats->reported_intensity = candidate; + stats->last_intensity_change = now; + notify_userspace(GAMING_SCENARIO_DETECTED, + candidate, + smooth_pps, + variance, + stats->active_ifindex, + stats->current_comm); + } + } + } + +out: + stats->game_intensity = stats->reported_intensity; +} + +BPF_FENTRY(udp_sendmsg, struct sock *sk, struct msghdr *msg, size_t len) +{ + __u32 key = 0; + struct gaming_stats *stats = bpf_map_lookup_elem(&stats_map, &key); + __u64 now = bpf_ktime_get_ns(); + __u16 dport = gaming_sock_dport(sk); + + if (!stats) + return 0; + + stats->udp_packets++; + gaming_count_packet(stats, now, len, dport); + +#ifndef BPFTUNE_LEGACY + if (sk) { + __u32 ifindex = BPF_CORE_READ(sk, __sk_common.skc_bound_dev_if); + + if (!ifindex) { + struct dst_entry *dst = BPF_CORE_READ(sk, sk_dst_cache); + if (dst) { + struct net_device *dev = BPF_CORE_READ(dst, dev); + if (dev) + ifindex = BPF_CORE_READ(dev, ifindex); + } + } + + if (ifindex) + stats->active_ifindex = ifindex; + } +#endif + + record_activity(stats, now); + + return 0; +} + +BPF_FENTRY(udp_recvmsg, struct sock *sk, struct msghdr *msg, size_t len, + int noblock, int flags, int *addr_len) +{ + __u32 key = 0; + struct gaming_stats *stats; + __u64 now; + __u16 dport = gaming_sock_dport(sk); + + stats = bpf_map_lookup_elem(&stats_map, &key); + if (!stats) + return 0; + + now = bpf_ktime_get_ns(); + + if (len > 0) + gaming_count_packet(stats, now, (__u64)len, dport); + + record_activity(stats, now); + + return 0; +} + +BPF_FENTRY(inet_create, struct net *net, struct socket *sock, int protocol, int kern) +{ + __u32 key = 0; + struct gaming_stats *stats = bpf_map_lookup_elem(&stats_map, &key); + + if (!stats) + return 0; + + if (!stats->last_pps_update) { + stats->last_pps_update = bpf_ktime_get_ns(); +#ifndef BPFTUNE_LEGACY + bpf_timer_init(&stats->timeout_timer, &stats_map, 0); + bpf_timer_set_callback(&stats->timeout_timer, gaming_timeout_cb); +#endif + } + + return 0; +} + +char __license[] SEC("license") = "GPL"; diff --git a/src/gaming_tuner.c b/src/gaming_tuner.c new file mode 100644 index 0000000..35c3c09 --- /dev/null +++ b/src/gaming_tuner.c @@ -0,0 +1,1264 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Gaming Performance Tuner - userspace controller + * Applies network tuning profiles when the eBPF detector reports activity. + */ + +#define _GNU_SOURCE +#define _POSIX_C_SOURCE 200809L + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gaming_tuner.h" +#include "gaming_tuner.skel.h" +#include "gaming_tuner.skel.legacy.h" +#include "gaming_tuner.skel.nobtf.h" + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +static struct bpftunable_desc tunable_descs[] = { + { GAMING_TUNABLE_RMEM_DEFAULT, BPFTUNABLE_SYSCTL, + "net.core.rmem_default", 0, 1 }, + { GAMING_TUNABLE_RMEM_MAX, BPFTUNABLE_SYSCTL, + "net.core.rmem_max", 0, 1 }, + { GAMING_TUNABLE_WMEM_DEFAULT, BPFTUNABLE_SYSCTL, + "net.core.wmem_default", 0, 1 }, + { GAMING_TUNABLE_WMEM_MAX, BPFTUNABLE_SYSCTL, + "net.core.wmem_max", 0, 1 }, + { GAMING_TUNABLE_NETDEV_MAX_BACKLOG, BPFTUNABLE_SYSCTL, + "net.core.netdev_max_backlog", 0, 1 }, + { GAMING_TUNABLE_NETDEV_BUDGET, BPFTUNABLE_SYSCTL, + "net.core.netdev_budget", 0, 1 }, + { GAMING_TUNABLE_NETDEV_BUDGET_USECS, BPFTUNABLE_SYSCTL, + "net.core.netdev_budget_usecs", 0, 1 }, + { GAMING_TUNABLE_UDP_MEM, BPFTUNABLE_SYSCTL, + "net.ipv4.udp_mem", BPFTUNABLE_STRING, 1 }, + { GAMING_TUNABLE_BUSY_READ, BPFTUNABLE_SYSCTL, + "net.core.busy_read", 0, 1 }, + { GAMING_TUNABLE_BUSY_POLL, BPFTUNABLE_SYSCTL, + "net.core.busy_poll", 0, 1 }, + { GAMING_TUNABLE_UDP_EARLY_DEMUX, BPFTUNABLE_SYSCTL, + "net.ipv4.udp_early_demux", 0, 1 }, +}; + +static struct bpftunable_scenario scenarios[] = { + BPFTUNABLE_SCENARIO_FLAGS(GAMING_SCENARIO_DETECTED, "gaming_detected", + "Gaming traffic pattern detected", + BPFTUNABLE_SCENARIO_QUIET), + BPFTUNABLE_SCENARIO_FLAGS(GAMING_SCENARIO_ENDED, "gaming_ended", + "Gaming session has ended", + BPFTUNABLE_SCENARIO_QUIET), +}; + +struct profile_entry { + unsigned int index; + long value; + const char *str_value; +}; + +struct profile_definition { + const char *name; + const struct profile_entry *entries; + size_t entry_count; +}; + +static const struct profile_entry casual_entries[] = { + { GAMING_TUNABLE_RMEM_DEFAULT, 262144, NULL }, + { GAMING_TUNABLE_RMEM_MAX, 8388608, NULL }, + { GAMING_TUNABLE_WMEM_DEFAULT, 262144, NULL }, + { GAMING_TUNABLE_WMEM_MAX, 8388608, NULL }, + { GAMING_TUNABLE_NETDEV_MAX_BACKLOG, 5000, NULL }, + { GAMING_TUNABLE_NETDEV_BUDGET, 400, NULL }, + { GAMING_TUNABLE_NETDEV_BUDGET_USECS, 8000, NULL }, + { GAMING_TUNABLE_UDP_MEM, 0, "65536 436900 8388608" }, + { GAMING_TUNABLE_BUSY_READ, 25, NULL }, + { GAMING_TUNABLE_BUSY_POLL, 25, NULL }, + { GAMING_TUNABLE_UDP_EARLY_DEMUX, 1, NULL }, +}; + +static const struct profile_entry competitive_entries[] = { + { GAMING_TUNABLE_RMEM_DEFAULT, 262144, NULL }, + { GAMING_TUNABLE_RMEM_MAX, 16777216, NULL }, + { GAMING_TUNABLE_WMEM_DEFAULT, 262144, NULL }, + { GAMING_TUNABLE_WMEM_MAX, 16777216, NULL }, + { GAMING_TUNABLE_NETDEV_MAX_BACKLOG, 5000, NULL }, + { GAMING_TUNABLE_NETDEV_BUDGET, 600, NULL }, + { GAMING_TUNABLE_NETDEV_BUDGET_USECS, 8000, NULL }, + { GAMING_TUNABLE_UDP_MEM, 0, "102400 873800 16777216" }, + { GAMING_TUNABLE_BUSY_READ, 50, NULL }, + { GAMING_TUNABLE_BUSY_POLL, 50, NULL }, + { GAMING_TUNABLE_UDP_EARLY_DEMUX, 1, NULL }, +}; + +static const struct profile_entry intense_entries[] = { + { GAMING_TUNABLE_RMEM_DEFAULT, 262144, NULL }, + { GAMING_TUNABLE_RMEM_MAX, 33554432, NULL }, + { GAMING_TUNABLE_WMEM_DEFAULT, 262144, NULL }, + { GAMING_TUNABLE_WMEM_MAX, 33554432, NULL }, + { GAMING_TUNABLE_NETDEV_MAX_BACKLOG, 5000, NULL }, + { GAMING_TUNABLE_NETDEV_BUDGET, 800, NULL }, + { GAMING_TUNABLE_NETDEV_BUDGET_USECS, 8000, NULL }, + { GAMING_TUNABLE_UDP_MEM, 0, "204800 1747600 33554432" }, + { GAMING_TUNABLE_BUSY_READ, 75, NULL }, + { GAMING_TUNABLE_BUSY_POLL, 75, NULL }, + { GAMING_TUNABLE_UDP_EARLY_DEMUX, 1, NULL }, +}; + +static const struct profile_definition profiles[] = { + { "CASUAL", casual_entries, ARRAY_SIZE(casual_entries) }, + { "COMPETITIVE", competitive_entries, ARRAY_SIZE(competitive_entries) }, + { "INTENSE", intense_entries, ARRAY_SIZE(intense_entries) }, +}; + +#define SUMMARY_BUFFER_SIZE 512 + +struct summary_buffer { + char data[SUMMARY_BUFFER_SIZE]; + size_t len; + int truncated; +}; + +static void summary_buffer_init(struct summary_buffer *buffer) +{ + if (!buffer) + return; + + buffer->data[0] = '\0'; + buffer->len = 0; + buffer->truncated = 0; +} + +static void summary_buffer_append(struct summary_buffer *buffer, const char *name, + const char *value) +{ + size_t remaining; + int written; + + if (!buffer || !name) + return; + + if (!value) + value = ""; + + if (buffer->truncated) + return; + + remaining = sizeof(buffer->data) - buffer->len; + if (remaining == 0) { + buffer->truncated = 1; + return; + } + + written = snprintf(buffer->data + buffer->len, remaining, "%s%s=%s", + buffer->len ? ", " : "", name, value); + + if (written < 0) { + buffer->truncated = 1; + buffer->data[sizeof(buffer->data) - 1] = '\0'; + return; + } + + if ((size_t)written >= remaining) { + buffer->len = sizeof(buffer->data) - 1; + buffer->data[buffer->len] = '\0'; + buffer->truncated = 1; + return; + } + + buffer->len += (size_t)written; +} + +static int summary_buffer_is_empty(const struct summary_buffer *buffer) +{ + return !buffer || buffer->len == 0; +} + +static const char *summary_buffer_text(const struct summary_buffer *buffer) +{ + return (buffer && buffer->len) ? buffer->data : ""; +} + +static int summary_buffer_truncated(const struct summary_buffer *buffer) +{ + return buffer && buffer->truncated; +} + +static char gaming_tolower_char(char c) +{ + if (c >= 'A' && c <= 'Z') + return c + ('a' - 'A'); + return c; +} + +static int gaming_comm_matches(const char *comm, const char *pattern) +{ + size_t i; + + if (!comm || !pattern) + return 0; + + for (i = 0; i < GAMING_TUNER_COMM_LEN; i++) { + char p = pattern[i]; + + if (p == '*') + return 1; + + char c = comm[i]; + + if (p == '\0') + return c == '\0'; + if (c == '\0') + return 0; + if (gaming_tolower_char(c) != p) + return 0; + } + + return pattern[GAMING_TUNER_COMM_LEN - 1] == '*'; +} + +static void gaming_copy_comm(char *dst, size_t dst_len, const char *src) +{ + size_t copy_len; + + if (!dst || !dst_len) + return; + + memset(dst, 0, dst_len); + + if (!src) + return; + + copy_len = strnlen(src, dst_len - 1); + if (copy_len) + memcpy(dst, src, copy_len); +} + +static void gaming_lineage_append(char *buffer, size_t buffer_len, const char *entry) +{ + size_t used; + int written; + + if (!buffer || !buffer_len || !entry || !entry[0]) + return; + + used = strnlen(buffer, buffer_len); + if (used >= buffer_len - 1) + return; + + written = snprintf(buffer + used, buffer_len - used, "%s%s", + used ? " <- " : "", entry); + if (written < 0) + return; + + if ((size_t)written >= buffer_len - used) + buffer[buffer_len - 1] = '\0'; +} + +static int gaming_launcher_comm(const char *comm) +{ + if (!comm || !comm[0]) + return 0; + +#define GAMING_LAUNCHER_ENTRY(str) \ + if (gaming_comm_matches(comm, str)) \ + return 1; + + GAMING_TUNER_FOR_EACH_LAUNCHER(GAMING_LAUNCHER_ENTRY) + +#undef GAMING_LAUNCHER_ENTRY + + return 0; +} + +static int gaming_read_proc_status(pid_t pid, char *comm, size_t comm_len, pid_t *ppid) +{ + char path[64]; + FILE *f; + char line[256]; + int need_comm = comm && comm_len; + int need_ppid = ppid != NULL; + + if (comm && comm_len) + comm[0] = '\0'; + if (ppid) + *ppid = -1; + + if (pid <= 0) + return -EINVAL; + + snprintf(path, sizeof(path), "/proc/%d/status", pid); + f = fopen(path, "re"); + if (!f) + return -errno; + + while ((need_comm || need_ppid) && fgets(line, sizeof(line), f)) { + if (need_comm && strncmp(line, "Name:", 5) == 0) { + char *value = line + 5; + + while (*value && isspace((unsigned char)*value)) + value++; + + char *end = value + strlen(value); + while (end > value && isspace((unsigned char)end[-1])) + end--; + + size_t len = (size_t)(end - value); + if (len >= comm_len) + len = comm_len ? comm_len - 1 : 0; + + if (comm && comm_len) { + if (len && comm_len) + memcpy(comm, value, len); + if (comm_len) + comm[len] = '\0'; + } + + need_comm = 0; + } else if (need_ppid && strncmp(line, "PPid:", 5) == 0) { + char *value = line + 5; + + while (*value && isspace((unsigned char)*value)) + value++; + + long parsed = strtol(value, NULL, 10); + if (ppid) + *ppid = (pid_t)parsed; + need_ppid = 0; + } + } + + fclose(f); + + if ((comm && comm_len && comm[0] == '\0') && need_comm) + return -ENOENT; + if (ppid && *ppid < 0 && need_ppid) + return -ENOENT; + + return 0; +} + +static int gaming_cmdline_launcher(pid_t pid) +{ + char path[64]; + int fd; + ssize_t len; + static const size_t buf_sz = 4096; + char *buf; + int trusted = 0; + + if (pid <= 0) + return 0; + + snprintf(path, sizeof(path), "/proc/%d/cmdline", pid); + fd = open(path, O_RDONLY); + if (fd < 0) + return 0; + + buf = malloc(buf_sz); + if (!buf) { + close(fd); + return 0; + } + + len = read(fd, buf, buf_sz - 1); + close(fd); + + if (len <= 0) { + free(buf); + return 0; + } + + for (ssize_t i = 0; i < len; i++) { + if (buf[i] == '\0') + buf[i] = ' '; + } + buf[len] = '\0'; + +#define GAMING_LAUNCHER_CMD_ENTRY(str) \ + if (!trusted && strcasestr(buf, (str))) \ + trusted = 1; + + GAMING_TUNER_FOR_EACH_LAUNCHER_CMD(GAMING_LAUNCHER_CMD_ENTRY) + +#undef GAMING_LAUNCHER_CMD_ENTRY + + free(buf); + return trusted; +} + +static int gaming_process_trusted(pid_t pid, char *lineage, size_t lineage_len, + int *matched_cmdline) +{ + pid_t current = pid; + + if (lineage && lineage_len) + lineage[0] = '\0'; + if (matched_cmdline) + *matched_cmdline = 0; + + for (int depth = 0; depth < 6; depth++) { + char comm[GAMING_TUNER_COMM_LEN] = { 0 }; + pid_t parent = -1; + int ret; + + if (current <= 0) + break; + + ret = gaming_read_proc_status(current, comm, sizeof(comm), &parent); + if (ret < 0) { + if (depth == 0) + return 0; + break; + } + + gaming_lineage_append(lineage, lineage_len, comm); + + if (gaming_launcher_comm(comm) || gaming_cmdline_launcher(current)) + { + if (matched_cmdline && !gaming_launcher_comm(comm)) + *matched_cmdline = 1; + return 1; + } + + if (parent <= 0 || parent == current) + break; + + current = parent; + } + + return 0; +} + +static size_t clamp_profile_index(int intensity) +{ + if (intensity < 0) + return 0; + + size_t idx = (size_t)intensity; + + if (idx >= ARRAY_SIZE(profiles)) + return ARRAY_SIZE(profiles) - 1; + + return idx; +} + +struct gaming_state { + int active; + int intensity; + int current_pps; + time_t start_time; + unsigned int optimization_count; + unsigned int revert_count; + unsigned int active_ifindex; + char active_ifname[IF_NAMESIZE]; + char active_comm[GAMING_TUNER_COMM_LEN]; + int pending_revert; + time_t revert_deadline; +}; + +static struct gaming_state g_state; +static pthread_mutex_t g_state_lock = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t g_state_cond = PTHREAD_COND_INITIALIZER; +static pthread_t g_revert_thread; +static int g_revert_thread_started; +static int g_revert_thread_stop; +static struct bpftuner *g_tuner; + +#define GAMING_REVERT_GRACE_SECONDS 10 +#define GAMING_MAX_INTERFACES 8 + +struct interface_tuning_state { + unsigned int ifindex; + char ifname[IF_NAMESIZE]; + struct ethtool_coalesce baseline; + __u32 current_rx_usecs; + __u32 current_tx_usecs; + int baseline_valid; + int applied; +}; + +static struct interface_tuning_state g_interfaces[GAMING_MAX_INTERFACES]; + +static int gaming_ethtool_get(const char *ifname, struct ethtool_coalesce *coal); +static int gaming_ethtool_set(const char *ifname, const struct ethtool_coalesce *coal); +static struct interface_tuning_state *gaming_interface_state(unsigned int ifindex); +static int gaming_interface_prepare(struct interface_tuning_state *state); +static int gaming_interface_restore_locked(struct interface_tuning_state *state); +static void apply_interface_tuning(unsigned int ifindex, int intensity); +static int restore_all_interfaces(void); +static void gaming_schedule_revert(void); +static void gaming_start_revert_worker(void); +static void gaming_stop_revert_worker(void); +static void *gaming_revert_worker(void *arg); +static void revert_optimizations(struct bpftuner *tuner, int force); + +static int gaming_ethtool_get(const char *ifname, struct ethtool_coalesce *coal) +{ + struct ifreq ifr; + struct ethtool_coalesce request = { .cmd = ETHTOOL_GCOALESCE }; + int fd; + + if (!ifname || !coal) + return -EINVAL; + + fd = socket(AF_INET, SOCK_DGRAM, 0); + if (fd < 0) + return -errno; + + memset(&ifr, 0, sizeof(ifr)); + snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s", ifname); + ifr.ifr_data = (void *)&request; + + if (ioctl(fd, SIOCETHTOOL, &ifr) < 0) { + int err = -errno; + close(fd); + return err; + } + + close(fd); + *coal = request; + return 0; +} + +static int gaming_ethtool_set(const char *ifname, const struct ethtool_coalesce *coal) +{ + struct ifreq ifr; + struct ethtool_coalesce request; + int fd; + + if (!ifname || !coal) + return -EINVAL; + + fd = socket(AF_INET, SOCK_DGRAM, 0); + if (fd < 0) + return -errno; + + memset(&ifr, 0, sizeof(ifr)); + request = *coal; + request.cmd = ETHTOOL_SCOALESCE; + + snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s", ifname); + ifr.ifr_data = (void *)&request; + + if (ioctl(fd, SIOCETHTOOL, &ifr) < 0) { + int err = -errno; + close(fd); + return err; + } + + close(fd); + return 0; +} + +static struct interface_tuning_state *gaming_interface_state(unsigned int ifindex) +{ + struct interface_tuning_state *free_state = NULL; + + if (!ifindex) + return NULL; + + for (size_t i = 0; i < GAMING_MAX_INTERFACES; i++) { + struct interface_tuning_state *state = &g_interfaces[i]; + + if (state->ifindex == ifindex) + return state; + if (!state->ifindex && !free_state) + free_state = state; + } + + if (!free_state) + return NULL; + + memset(free_state, 0, sizeof(*free_state)); + free_state->ifindex = ifindex; + return free_state; +} + +static int gaming_interface_prepare(struct interface_tuning_state *state) +{ + int ret; + + if (!state) + return -EINVAL; + + if (!if_indextoname(state->ifindex, state->ifname)) { + ret = -errno; + bpftune_log(LOG_DEBUG, + "gaming: unable to resolve ifindex %u: %s", + state->ifindex, strerror(-ret)); + return ret; + } + + if (state->baseline_valid) + return 0; + + ret = gaming_ethtool_get(state->ifname, &state->baseline); + if (ret) { + bpftune_log(LOG_DEBUG, + "gaming: failed to read ethtool coalesce for %s: %s", + state->ifname, strerror(-ret)); + return ret; + } + + state->current_rx_usecs = state->baseline.rx_coalesce_usecs; + state->current_tx_usecs = state->baseline.tx_coalesce_usecs; + state->baseline_valid = 1; + state->applied = 0; + + return 0; +} + +static int gaming_interface_restore_locked(struct interface_tuning_state *state) +{ + int ret; + + if (!state || !state->ifindex || !state->baseline_valid) + return 0; + + if (!state->applied && + state->current_rx_usecs == state->baseline.rx_coalesce_usecs && + state->current_tx_usecs == state->baseline.tx_coalesce_usecs) + return 0; + + ret = gaming_ethtool_set(state->ifname, &state->baseline); + if (ret) { + bpftune_log(LOG_DEBUG, + "gaming: failed to restore coalesce on %s: %s", + state->ifname, strerror(-ret)); + return ret; + } + + state->current_rx_usecs = state->baseline.rx_coalesce_usecs; + state->current_tx_usecs = state->baseline.tx_coalesce_usecs; + state->applied = 0; + + bpftune_log(LOG_INFO, + "Restored interrupt coalescing on %s to baseline %u usecs", + state->ifname, state->baseline.rx_coalesce_usecs); + + return 0; +} + +static void apply_interface_tuning(unsigned int ifindex, int intensity) +{ + struct interface_tuning_state *state; + unsigned int target_usecs; + int ret; + + if (!ifindex) + return; + + pthread_mutex_lock(&g_state_lock); + + state = gaming_interface_state(ifindex); + if (!state) { + pthread_mutex_unlock(&g_state_lock); + bpftune_log(LOG_DEBUG, + "gaming: no available slot for interface index %u", ifindex); + return; + } + + ret = gaming_interface_prepare(state); + if (ret) { + pthread_mutex_unlock(&g_state_lock); + return; + } + + if (intensity <= 0) { + gaming_interface_restore_locked(state); + pthread_mutex_unlock(&g_state_lock); + return; + } + + target_usecs = (intensity >= 2) ? 0 : 10; + + if (state->current_rx_usecs == target_usecs && + state->current_tx_usecs == target_usecs) { + pthread_mutex_unlock(&g_state_lock); + return; + } + + struct ethtool_coalesce desired = state->baseline; + + desired.rx_coalesce_usecs = target_usecs; + desired.tx_coalesce_usecs = target_usecs; + desired.use_adaptive_rx_coalesce = 0; + desired.use_adaptive_tx_coalesce = 0; + + ret = gaming_ethtool_set(state->ifname, &desired); + if (ret) { + bpftune_log(LOG_DEBUG, + "gaming: failed to set coalesce on %s: %s", + state->ifname, strerror(-ret)); + pthread_mutex_unlock(&g_state_lock); + return; + } + + state->current_rx_usecs = target_usecs; + state->current_tx_usecs = target_usecs; + state->applied = (target_usecs != state->baseline.rx_coalesce_usecs || + target_usecs != state->baseline.tx_coalesce_usecs); + + bpftune_log(LOG_INFO, + "Adjusted %s interrupt coalescing to %u usecs for gaming intensity %d", + state->ifname, target_usecs, intensity); + + pthread_mutex_unlock(&g_state_lock); +} + +static int restore_all_interfaces(void) +{ + int failures = 0; + + pthread_mutex_lock(&g_state_lock); + for (size_t i = 0; i < GAMING_MAX_INTERFACES; i++) { + struct interface_tuning_state *state = &g_interfaces[i]; + + if (!state->ifindex || !state->baseline_valid) + continue; + + if (gaming_interface_restore_locked(state) < 0) + failures++; + } + pthread_mutex_unlock(&g_state_lock); + + return failures; +} + +static void gaming_schedule_revert(void) +{ + time_t now = time(NULL); + + pthread_mutex_lock(&g_state_lock); + if (!g_state.active) { + g_state.pending_revert = 0; + g_state.revert_deadline = 0; + pthread_mutex_unlock(&g_state_lock); + return; + } + + g_state.pending_revert = 1; + g_state.revert_deadline = now + GAMING_REVERT_GRACE_SECONDS; + pthread_cond_broadcast(&g_state_cond); + pthread_mutex_unlock(&g_state_lock); + + bpftune_log(LOG_DEBUG, + "Gaming traffic paused; baseline restore scheduled in %d seconds", + GAMING_REVERT_GRACE_SECONDS); +} + +static void gaming_start_revert_worker(void) +{ + if (g_revert_thread_started) + return; + + g_revert_thread_stop = 0; + int ret = pthread_create(&g_revert_thread, NULL, gaming_revert_worker, NULL); + if (ret == 0) { + g_revert_thread_started = 1; + } else { + bpftune_log(LOG_WARNING, "gaming: failed to start revert worker: %s", + strerror(ret)); + } +} + +static void gaming_stop_revert_worker(void) +{ + if (!g_revert_thread_started) + return; + + pthread_mutex_lock(&g_state_lock); + g_revert_thread_stop = 1; + pthread_cond_broadcast(&g_state_cond); + pthread_mutex_unlock(&g_state_lock); + + pthread_join(g_revert_thread, NULL); + g_revert_thread_started = 0; + g_revert_thread_stop = 0; +} + +static void *gaming_revert_worker(void *arg) +{ + (void)arg; + + while (1) { + pthread_mutex_lock(&g_state_lock); + while (!g_state.pending_revert && !g_revert_thread_stop) + pthread_cond_wait(&g_state_cond, &g_state_lock); + + if (g_revert_thread_stop) { + pthread_mutex_unlock(&g_state_lock); + break; + } + + while (g_state.pending_revert && !g_revert_thread_stop) { + time_t now = time(NULL); + + if (now >= g_state.revert_deadline) { + struct bpftuner *tuner = g_tuner; + + g_state.pending_revert = 0; + pthread_mutex_unlock(&g_state_lock); + if (tuner) + revert_optimizations(tuner, 0); + pthread_mutex_lock(&g_state_lock); + break; + } + + time_t wait_seconds = g_state.revert_deadline - now; + struct timespec ts; + + clock_gettime(CLOCK_REALTIME, &ts); + ts.tv_sec += wait_seconds; + + if (pthread_cond_timedwait(&g_state_cond, &g_state_lock, &ts) == ETIMEDOUT) + continue; + } + + pthread_mutex_unlock(&g_state_lock); + } + + return NULL; +} + +static void apply_profile(struct bpftuner *tuner, int profile_idx, int conservative) +{ + size_t requested = clamp_profile_index(profile_idx); + size_t effective = conservative && requested > 0 ? requested - 1 : requested; + const struct profile_definition *profile = &profiles[effective]; + const char *profile_name = profile->name; + struct summary_buffer summary; + int applied = 0; + + summary_buffer_init(&summary); + + if (profile_idx != (int)requested) { + bpftune_log(LOG_DEBUG, "Clamped gaming profile index %d to %zu", profile_idx, + requested); + } + + bpftune_log(LOG_DEBUG, "Applying %s gaming profile%s", profile_name, + conservative ? " (conservative mode)" : ""); + + for (size_t i = 0; i < profile->entry_count; i++) { + const struct profile_entry *entry = &profile->entries[i]; + const char *str_val = entry->str_value; + long val = entry->value; + unsigned int index = entry->index; + struct bpftunable *tunable = bpftuner_tunable(tuner, index); + int ret = 0; + + if (!tunable) + continue; + + if ((tunable->desc.flags & BPFTUNABLE_STRING) && str_val) { + ret = bpftuner_tunable_sysctl_write(tuner, index, GAMING_SCENARIO_DETECTED, + bpftune_global_netns_cookie(), 1, + (void *)str_val, NULL); + } else { + ret = bpftuner_tunable_sysctl_write(tuner, index, GAMING_SCENARIO_DETECTED, + bpftune_global_netns_cookie(), 1, + &val, NULL); + } + + if (ret < 0) { + bpftune_log(LOG_WARNING, + "Failed to set '%s' for %s gaming profile: %s", + tunable->desc.name, profile_name, strerror(-ret)); + continue; + } + + char value_buf[128]; + const char *value_str = str_val; + + if (!value_str) { + snprintf(value_buf, sizeof(value_buf), "%ld", val); + value_str = value_buf; + } + + summary_buffer_append(&summary, tunable->desc.name, value_str); + + applied = 1; + } + + pthread_mutex_lock(&g_state_lock); + if (applied) { + g_state.active = 1; + g_state.start_time = time(NULL); + g_state.optimization_count++; + } + g_state.pending_revert = 0; + g_state.revert_deadline = 0; + pthread_cond_broadcast(&g_state_cond); + pthread_mutex_unlock(&g_state_lock); + + if (applied) { + const char *details = summary_buffer_is_empty(&summary) ? + "(no tunables changed)" : + summary_buffer_text(&summary); + bpftune_log(LOG_NOTICE, "Applied %s profile: %s%s", + profile_name, + details, + summary_buffer_truncated(&summary) ? " ..." : ""); + + pthread_mutex_lock(&g_state_lock); + g_state.intensity = (int)requested; + pthread_mutex_unlock(&g_state_lock); + } +} + +static void revert_optimizations(struct bpftuner *tuner, int force) +{ + int active; + int dirty; + struct summary_buffer summary; + int restored = 0; + + summary_buffer_init(&summary); + + pthread_mutex_lock(&g_state_lock); + active = g_state.active; + dirty = g_state.optimization_count > g_state.revert_count; + + if (!force && !active) { + g_state.pending_revert = 0; + g_state.revert_deadline = 0; + pthread_mutex_unlock(&g_state_lock); + return; + } + + if (force && !active && !dirty) { + g_state.pending_revert = 0; + g_state.revert_deadline = 0; + pthread_mutex_unlock(&g_state_lock); + return; + } + + g_state.pending_revert = 0; + g_state.revert_deadline = 0; + pthread_mutex_unlock(&g_state_lock); + + bpftune_log(LOG_NOTICE, "Reverting gaming optimizations"); + + int tunable_failures = 0; + int interface_failures = 0; + struct bpftunable *t; + + bpftuner_for_each_tunable(tuner, t) { + int ret; + + if (t->desc.flags & BPFTUNABLE_STRING) { + ret = bpftuner_tunable_sysctl_write(tuner, t->desc.id, GAMING_SCENARIO_ENDED, + bpftune_global_netns_cookie(), 1, + (void *)t->initial_str, NULL); + } else { + long val = t->initial_values[0]; + + ret = bpftuner_tunable_sysctl_write(tuner, t->desc.id, GAMING_SCENARIO_ENDED, + bpftune_global_netns_cookie(), 1, + &val, NULL); + } + + if (ret < 0) { + bpftune_log(LOG_WARNING, "Failed to restore '%s' to baseline: %s", + t->desc.name, strerror(-ret)); + tunable_failures++; + continue; + } + + char value_buf[128]; + const char *value_str; + + if (t->desc.flags & BPFTUNABLE_STRING) { + value_str = t->initial_str[0] ? t->initial_str : ""; + } else { + size_t buf_len = 0; + + value_buf[0] = '\0'; + for (__u8 i = 0; i < t->desc.num_values; i++) { + int written = snprintf(value_buf + buf_len, + sizeof(value_buf) - buf_len, + "%s%ld", + buf_len ? " " : "", + t->initial_values[i]); + if (written < 0) + written = 0; + if ((size_t)written >= sizeof(value_buf) - buf_len) { + buf_len = sizeof(value_buf) - 1; + value_buf[buf_len] = '\0'; + break; + } + buf_len += (size_t)written; + } + + value_str = value_buf; + } + + summary_buffer_append(&summary, t->desc.name, value_str); + restored++; + } + + interface_failures = restore_all_interfaces(); + + pthread_mutex_lock(&g_state_lock); + if (tunable_failures == 0 && interface_failures == 0) { + g_state.active = 0; + g_state.revert_count++; + g_state.intensity = 0; + g_state.active_ifindex = 0; + g_state.active_ifname[0] = '\0'; + g_state.active_comm[0] = '\0'; + pthread_cond_broadcast(&g_state_cond); + pthread_mutex_unlock(&g_state_lock); + if (restored) { + const char *details = summary_buffer_is_empty(&summary) ? + "baseline already active" : + summary_buffer_text(&summary); + bpftune_log(LOG_NOTICE, "Reverted gaming tunables: %s%s", + details, + summary_buffer_truncated(&summary) ? " ..." : ""); + } else { + bpftune_log(LOG_DEBUG, "Gaming profile settings already at baseline"); + } + return; + } + + g_state.pending_revert = 1; + g_state.revert_deadline = time(NULL) + GAMING_REVERT_GRACE_SECONDS; + pthread_cond_broadcast(&g_state_cond); + pthread_mutex_unlock(&g_state_lock); + + if (interface_failures) { + bpftune_log(LOG_WARNING, + "Failed to restore interrupt coalescing on %d interface(s); will retry", + interface_failures); + } + + if (tunable_failures) { + bpftune_log(LOG_WARNING, + "Failed to restore %d gaming tunable(s); will retry in %d seconds", + tunable_failures, + GAMING_REVERT_GRACE_SECONDS); + } else { + bpftune_log(LOG_WARNING, + "Gaming tunables restored but interface rollback still pending; retrying in %d seconds", + GAMING_REVERT_GRACE_SECONDS); + } +} + + +int init(struct bpftuner *tuner) +{ + int ret = bpftuner_tunables_init(tuner, ARRAY_SIZE(tunable_descs), tunable_descs, + ARRAY_SIZE(scenarios), scenarios); + if (ret != 0) { + bpftune_log(LOG_ERR, "Failed to initialize gaming tuner descriptors: %d", ret); + return ret; + } + + if (bpftune_bpf_support() == BPFTUNE_SUPPORT_NONE) { + bpftune_log(LOG_ERR, "Gaming tuner requires BPF support"); + return -1; + } + + pthread_mutex_lock(&g_state_lock); + g_state = (struct gaming_state){0}; + pthread_mutex_unlock(&g_state_lock); + memset(g_interfaces, 0, sizeof(g_interfaces)); + + g_tuner = tuner; + gaming_start_revert_worker(); + + bpftune_log(LOG_NOTICE, + "Gaming tuner ready: tracking UDP payloads ≤%u bytes with ≥%u pkt/s sustained for detection", + GAMING_TUNER_UDP_MAX_SIZE, GAMING_TUNER_UDP_MIN_PPS); + + return bpftuner_bpf_init(gaming, tuner, NULL); +} + +void fini(struct bpftuner *tuner) +{ + gaming_stop_revert_worker(); + + pthread_mutex_lock(&g_state_lock); + int active = g_state.active; + int dirty = g_state.optimization_count > g_state.revert_count; + pthread_mutex_unlock(&g_state_lock); + + if (active || dirty) + revert_optimizations(tuner, 1); + + bpftuner_bpf_fini(tuner); + g_tuner = NULL; +} + +void event_handler(struct bpftuner *tuner, struct bpftune_event *event, + __attribute__((unused)) void *ctx) +{ + if (!event) + return; + + switch (event->scenario_id) { + case GAMING_SCENARIO_DETECTED: { + struct gaming_event_data data = {}; + char ifname[IF_NAMESIZE] = { 0 }; + char comm[GAMING_TUNER_COMM_LEN] = { 0 }; + int intensity; + int pps; + long variance; + unsigned int ifindex; + size_t profile_idx; + const char *profile_name; + + memcpy(&data, event->raw_data, sizeof(data)); + + intensity = (int)data.intensity; + pps = (int)data.pps; + variance = (long)data.variance; + ifindex = data.ifindex; + profile_idx = clamp_profile_index(intensity); + profile_name = profiles[profile_idx].name; + + gaming_copy_comm(comm, sizeof(comm), data.comm); + + if (ifindex) + if_indextoname(ifindex, ifname); + + if (!gaming_launcher_comm(comm)) { + pid_t pid = (pid_t)event->pid; + char lineage[256] = { 0 }; + int matched_cmdline = 0; + + if (!gaming_process_trusted(pid, lineage, sizeof(lineage), + &matched_cmdline)) { + bpftune_log(LOG_DEBUG, + "Ignoring gaming detection from untrusted lineage (pid: %d, process: %s%s%s)", + event->pid, + comm[0] ? comm : "", + lineage[0] ? ", lineage: " : "", + lineage[0] ? lineage : ""); + break; + } + } + + pthread_mutex_lock(&g_state_lock); + g_state.intensity = intensity; + g_state.current_pps = pps; + g_state.pending_revert = 0; + g_state.revert_deadline = 0; + g_state.active_ifindex = ifindex; + if (ifindex && ifname[0]) + snprintf(g_state.active_ifname, sizeof(g_state.active_ifname), "%s", ifname); + else + g_state.active_ifname[0] = '\0'; + g_state.active_ifname[sizeof(g_state.active_ifname) - 1] = '\0'; + gaming_copy_comm(g_state.active_comm, sizeof(g_state.active_comm), comm); + pthread_cond_broadcast(&g_state_cond); + pthread_mutex_unlock(&g_state_lock); + + if (comm[0]) { + bpftune_log(LOG_NOTICE, + "Detected %s gaming profile (process: %s, pps: %d, variance: %ld%s%s)", + profile_name, comm, pps, variance, + ifname[0] ? ", interface: " : "", + ifname[0] ? ifname : ""); + } else { + bpftune_log(LOG_NOTICE, + "Detected %s gaming profile (pps: %d, variance: %ld%s%s)", + profile_name, pps, variance, + ifname[0] ? ", interface: " : "", + ifname[0] ? ifname : ""); + } + + apply_profile(tuner, intensity, 0); + apply_interface_tuning(ifindex, intensity); + break; + } + + case GAMING_SCENARIO_ENDED: { + struct gaming_event_data data = {}; + char ifname[IF_NAMESIZE] = { 0 }; + char comm[GAMING_TUNER_COMM_LEN] = { 0 }; + char tracked_comm[GAMING_TUNER_COMM_LEN] = { 0 }; + const char *log_comm = NULL; + int was_active; + int pps; + long variance; + unsigned int ifindex; + + memcpy(&data, event->raw_data, sizeof(data)); + + pps = (int)data.pps; + variance = (long)data.variance; + ifindex = data.ifindex; + + gaming_copy_comm(comm, sizeof(comm), data.comm); + + if (ifindex) + if_indextoname(ifindex, ifname); + + pthread_mutex_lock(&g_state_lock); + was_active = g_state.active; + if (was_active) { + g_state.current_pps = pps; + if (ifindex) { + g_state.active_ifindex = ifindex; + if (ifname[0]) + snprintf(g_state.active_ifname, sizeof(g_state.active_ifname), "%s", ifname); + } + g_state.active_ifname[sizeof(g_state.active_ifname) - 1] = '\0'; + gaming_copy_comm(tracked_comm, sizeof(tracked_comm), g_state.active_comm); + } + pthread_mutex_unlock(&g_state_lock); + + if (!was_active) + break; + + if (tracked_comm[0]) + log_comm = tracked_comm; + else if (comm[0]) + log_comm = comm; + + if (log_comm) { + bpftune_log(LOG_DEBUG, + "Gaming traffic quiet (process: %s, pps: %d, variance: %ld%s%s); scheduling baseline restore", + log_comm, pps, variance, + ifname[0] ? ", interface: " : "", + ifname[0] ? ifname : ""); + } else { + bpftune_log(LOG_DEBUG, + "Gaming traffic quiet (pps: %d, variance: %ld%s%s); scheduling baseline restore", + pps, variance, + ifname[0] ? ", interface: " : "", + ifname[0] ? ifname : ""); + } + + gaming_schedule_revert(); + break; + } + + default: + bpftune_log(LOG_DEBUG, "Unknown event scenario %u for tuner %s", + event->scenario_id, tuner->name); + break; + } +} diff --git a/src/gaming_tuner.h b/src/gaming_tuner.h new file mode 100644 index 0000000..ad2c9cd --- /dev/null +++ b/src/gaming_tuner.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Gaming Performance Tuner definitions shared between user and BPF code. + */ + +#ifndef GAMING_TUNER_H +#define GAMING_TUNER_H + +#include + +#ifndef GAMING_TUNER_COMM_LEN +#ifdef TASK_COMM_LEN +#define GAMING_TUNER_COMM_LEN TASK_COMM_LEN +#else +#define GAMING_TUNER_COMM_LEN 16 +#endif +#endif + +#define GAMING_TUNER_FOR_EACH_LAUNCHER(ENTRY) \ + ENTRY("steam") \ + ENTRY("steamwebhelper") \ + ENTRY("steam-runtime*") \ + ENTRY("pressure-vessel") \ + ENTRY("gamescope") \ + ENTRY("gamemoderun") \ + ENTRY("gamemoded") \ + ENTRY("heroic") \ + ENTRY("lutris") \ + ENTRY("legendary") \ + ENTRY("bottles") \ + ENTRY("mangohud") \ + ENTRY("proton") \ + ENTRY("steamworks*") \ + ENTRY("wine*") + +/* Number of samples stored when smoothing packets-per-second estimates. */ +#define GAMING_TUNER_PPS_HISTORY 8 + +/* UDP payload threshold when counting UDP packets for gaming detection. */ +#define GAMING_TUNER_UDP_MAX_SIZE 1500 + +/* Minimum packets-per-second before we treat traffic as gaming related. */ +#define GAMING_TUNER_UDP_MIN_PPS 25 + +/* Smoothed PPS thresholds used to select competitive vs intense profiles. */ +#define GAMING_TUNER_COMPETITIVE_PPS 70 +#define GAMING_TUNER_INTENSE_PPS 140 + +/* Threshold below which we consider traffic idle and start rollback countdown. */ +#define GAMING_TUNER_IDLE_PPS 10 + +#define GAMING_TUNER_INTENSITY_MARGIN 15 +#define GAMING_TUNER_COMP_HIGH (GAMING_TUNER_COMPETITIVE_PPS + GAMING_TUNER_INTENSITY_MARGIN) +#define GAMING_TUNER_COMP_LOW \ + ((GAMING_TUNER_COMPETITIVE_PPS > GAMING_TUNER_INTENSITY_MARGIN) ? \ + (GAMING_TUNER_COMPETITIVE_PPS - GAMING_TUNER_INTENSITY_MARGIN) : \ + GAMING_TUNER_UDP_MIN_PPS) +#define GAMING_TUNER_INTENSE_HIGH (GAMING_TUNER_INTENSE_PPS + GAMING_TUNER_INTENSITY_MARGIN) +#define GAMING_TUNER_INTENSE_LOW \ + ((GAMING_TUNER_INTENSE_PPS > GAMING_TUNER_INTENSITY_MARGIN) ? \ + (GAMING_TUNER_INTENSE_PPS - GAMING_TUNER_INTENSITY_MARGIN) : \ + GAMING_TUNER_COMP_HIGH) + +#define GAMING_TUNER_VARIANCE_MIN 15 +#define GAMING_TUNER_VARIANCE_REL_NUM 3 +#define GAMING_TUNER_VARIANCE_REL_DEN 2 + +/* Nanoseconds without activity before declaring the session finished. */ +#define GAMING_TUNER_TIMEOUT_NS (5ULL * 1000000000ULL) + +enum gaming_tunables { + GAMING_TUNABLE_RMEM_DEFAULT, + GAMING_TUNABLE_RMEM_MAX, + GAMING_TUNABLE_WMEM_DEFAULT, + GAMING_TUNABLE_WMEM_MAX, + GAMING_TUNABLE_NETDEV_MAX_BACKLOG, + GAMING_TUNABLE_NETDEV_BUDGET, + GAMING_TUNABLE_NETDEV_BUDGET_USECS, + GAMING_TUNABLE_UDP_MEM, + GAMING_TUNABLE_BUSY_READ, + GAMING_TUNABLE_BUSY_POLL, + GAMING_TUNABLE_UDP_EARLY_DEMUX, + GAMING_TUNABLE_COUNT, +}; + +enum gaming_scenarios { + GAMING_SCENARIO_DETECTED, + GAMING_SCENARIO_ENDED, +}; + +enum gaming_tuner_event_index { + GAMING_TUNER_EVENT_INTENSITY, + GAMING_TUNER_EVENT_PPS, + GAMING_TUNER_EVENT_VARIANCE, + GAMING_TUNER_EVENT_IFINDEX, + GAMING_TUNER_EVENT_COUNT, +}; + +struct gaming_event_data { + __u32 intensity; + __u32 pps; + __u32 variance; + __u32 ifindex; + char comm[GAMING_TUNER_COMM_LEN]; +}; + +#endif /* GAMING_TUNER_H */ +#define GAMING_TUNER_FOR_EACH_LAUNCHER_CMD(ENTRY) \ + ENTRY("steamapps/") \ + ENTRY("SteamLaunch") \ + ENTRY("PressureVessel") \ + ENTRY("pressure-vessel") \ + ENTRY("gamescope") \ + ENTRY("gamemoderun") \ + ENTRY("gamemode") \ + ENTRY("heroic") \ + ENTRY("lutris") \ + ENTRY("legendary") \ + ENTRY("bottles") \ + ENTRY("mangohud") \ + ENTRY("proton") \ + ENTRY("steamapps/compatdata/") \ + ENTRY("steamapps/common/") \ + ENTRY("steam-runtime") \ + ENTRY("wine") \ + ENTRY(".exe") diff --git a/src/ip_frag_tuner.c b/src/ip_frag_tuner.c index 842a347..9338609 100644 --- a/src/ip_frag_tuner.c +++ b/src/ip_frag_tuner.c @@ -22,10 +22,12 @@ static struct bpftunable_desc descs[] = { }; static struct bpftunable_scenario scenarios[] = { -{ IP_FRAG_THRESHOLD_INCREASE, "need to increase IP fragmentation high threshold", - "this allows additional memory to be used to accommodate more defragmentation." }, -{ IP_FRAG_THRESHOLD_DECREASE, "need to decrease IP fragmentation high threshold", - "as we increased fragmentation high threshold we saw a correlation in reassembly failures; this indicates that we received more invalid fragments as we added memory to process them. As such, further increases are likely to be ineffective so reduce high threshold." }, + BPFTUNABLE_SCENARIO(IP_FRAG_THRESHOLD_INCREASE, + "need to increase IP fragmentation high threshold", + "this allows additional memory to be used to accommodate more defragmentation."), + BPFTUNABLE_SCENARIO(IP_FRAG_THRESHOLD_DECREASE, + "need to decrease IP fragmentation high threshold", + "as we increased fragmentation high threshold we saw a correlation in reassembly failures; this indicates that we received more invalid fragments as we added memory to process them. As such, further increases are likely to be ineffective so reduce high threshold."), }; int init(struct bpftuner *tuner) diff --git a/src/libbpftune.c b/src/libbpftune.c index 6cddf1f..f200027 100644 --- a/src/libbpftune.c +++ b/src/libbpftune.c @@ -1440,8 +1440,16 @@ static void __bpftuner_scenario_log(struct bpftuner *tuner, unsigned int tunable const char *fmt, va_list *args) { struct bpftunable *t = bpftuner_tunable(tuner, tunable); + struct bpftunable_scenario *sc; bool global_ns = netns_fd == 0; + if (!t) + return; + if (scenario >= tuner->num_scenarios) + return; + + sc = &tuner->scenarios[scenario]; + if (summary) { unsigned long count; @@ -1450,10 +1458,10 @@ static void __bpftuner_scenario_log(struct bpftuner *tuner, unsigned int tunable if (!count) return; bpftune_log(BPFTUNE_LOG_LEVEL, "# Summary: scenario '%s' occurred %ld times for tunable '%s' in %sglobal ns. %s\n", - tuner->scenarios[scenario].name, count, + sc->name, count, t->desc.name, global_ns ? "" : "non-", - tuner->scenarios[scenario].description); + sc->description ? sc->description : ""); if (t->desc.type == BPFTUNABLE_SYSCTL && global_ns) { char oldvals[PATH_MAX] = { }; char newvals[PATH_MAX] = { }; @@ -1480,14 +1488,19 @@ static void __bpftuner_scenario_log(struct bpftuner *tuner, unsigned int tunable t->desc.name, newvals); } } else { - bpftune_log(BPFTUNE_LOG_LEVEL, "Scenario '%s' occurred for tunable '%s' in %sglobal ns. %s\n", - tuner->scenarios[scenario].name, - t->desc.name, - global_ns ? "" : "non-", - tuner->scenarios[scenario].description); - if (args) + if (!(sc->flags & BPFTUNABLE_SCENARIO_QUIET)) { + bpftune_log(BPFTUNE_LOG_LEVEL, + "Scenario '%s' occurred for tunable '%s' in %sglobal ns. %s\n", + sc->name, + t->desc.name, + global_ns ? "" : "non-", + sc->description ? sc->description : ""); + if (fmt && args) __bpftune_log(BPFTUNE_LOG_LEVEL, fmt, *args); - __bpftuner_tunable_stats_update(t, scenario, global_ns, 1); + } else if (fmt && args) { + __bpftune_log(LOG_DEBUG, fmt, *args); + } + __bpftuner_tunable_stats_update(t, scenario, global_ns, 1); } } diff --git a/src/neigh_table_tuner.c b/src/neigh_table_tuner.c index a04d1a2..112ba0b 100644 --- a/src/neigh_table_tuner.c +++ b/src/neigh_table_tuner.c @@ -57,8 +57,9 @@ static struct bpftunable_desc descs[] = { }; static struct bpftunable_scenario scenarios[] = { -{ NEIGH_TABLE_FULL, "neighbour table nearly full", - "neighbour table is nearly full, preventing new entries from being added." }, + BPFTUNABLE_SCENARIO(NEIGH_TABLE_FULL, + "neighbour table nearly full", + "neighbour table is nearly full, preventing new entries from being added."), }; int init(struct bpftuner *tuner) diff --git a/src/net_buffer_tuner.c b/src/net_buffer_tuner.c index 1f4b1da..acf45c6 100644 --- a/src/net_buffer_tuner.c +++ b/src/net_buffer_tuner.c @@ -26,14 +26,18 @@ static struct bpftunable_desc descs[] = { }; static struct bpftunable_scenario scenarios[] = { -{ NETDEV_MAX_BACKLOG_INCREASE, "need to increase max backlog size", - "Need to increase backlog size to prevent drops for faster connection" }, -{ FLOW_LIMIT_CPU_SET, "need to set per-cpu bitmap value", - "Need to set flow limit per-cpu to prioritize small flows" }, -{ NETDEV_BUDGET_INCREASE, "need to increase # of packets processed per NAPI poll", - "Need to increase number of packets processed across network devices during NAPI poll to use all of net.core.netdev_budget_usecs" }, -{ NETDEV_BUDGET_DECREASE, "need to decrease # of packets processed per NAPI poll", - "Need to decrease netdev_budget[_usecs] since the ratio of time spent waiting to run versus time spent running for tasks has increased as we have increased netdev budget. This indicates either our budget increases directly let to increased wait times for other tasks, or that general load has increased; either way spending too much time in NAPI processing will hurt system performance." } + BPFTUNABLE_SCENARIO(NETDEV_MAX_BACKLOG_INCREASE, + "need to increase max backlog size", + "Need to increase backlog size to prevent drops for faster connection"), + BPFTUNABLE_SCENARIO(FLOW_LIMIT_CPU_SET, + "need to set per-cpu bitmap value", + "Need to set flow limit per-cpu to prioritize small flows"), + BPFTUNABLE_SCENARIO(NETDEV_BUDGET_INCREASE, + "need to increase # of packets processed per NAPI poll", + "Need to increase number of packets processed across network devices during NAPI poll to use all of net.core.netdev_budget_usecs"), + BPFTUNABLE_SCENARIO(NETDEV_BUDGET_DECREASE, + "need to decrease # of packets processed per NAPI poll", + "Need to decrease netdev_budget[_usecs] since the ratio of time spent waiting to run versus time spent running for tasks has increased as we have increased netdev budget. This indicates either our budget increases directly let to increased wait times for other tasks, or that general load has increased; either way spending too much time in NAPI processing will hurt system performance."), }; int init(struct bpftuner *tuner) diff --git a/src/netns_tuner.c b/src/netns_tuner.c index ac016bc..96a0113 100644 --- a/src/netns_tuner.c +++ b/src/netns_tuner.c @@ -33,8 +33,10 @@ static struct bpftunable_desc descs[] = { }; static struct bpftunable_scenario scenarios[] = { -{ NETNS_SCENARIO_CREATE, "netns created", "network namespace creation" }, -{ NETNS_SCENARIO_DESTROY, "netns destroyed", "network namespace destruction" }, + BPFTUNABLE_SCENARIO(NETNS_SCENARIO_CREATE, + "netns created", "network namespace creation"), + BPFTUNABLE_SCENARIO(NETNS_SCENARIO_DESTROY, + "netns destroyed", "network namespace destruction"), }; int init(struct bpftuner *tuner) diff --git a/src/route_table_tuner.c b/src/route_table_tuner.c index 268b0ce..cd12567 100644 --- a/src/route_table_tuner.c +++ b/src/route_table_tuner.c @@ -33,8 +33,9 @@ static struct bpftunable_desc descs[] = { }; static struct bpftunable_scenario scenarios[] = { -{ ROUTE_TABLE_FULL, "destination table nearly full", - "destination table is nearly full, preventing new entries from being added." }, + BPFTUNABLE_SCENARIO(ROUTE_TABLE_FULL, + "destination table nearly full", + "destination table is nearly full, preventing new entries from being added."), }; int init(struct bpftuner *tuner) diff --git a/src/tcp_buffer_tuner.c b/src/tcp_buffer_tuner.c index 7627bcf..2f25f94 100644 --- a/src/tcp_buffer_tuner.c +++ b/src/tcp_buffer_tuner.c @@ -38,34 +38,45 @@ static struct bpftunable_desc descs[] = { }; static struct bpftunable_scenario scenarios[] = { -{ TCP_BUFFER_INCREASE, "need to increase TCP buffer size(s)", - "Need to increase buffer size(s) to maximize throughput" }, -{ TCP_BUFFER_DECREASE, "need to decrease TCP buffer size(s)", - "Need to decrease buffer size(s) to reduce memory utilization" }, -{ TCP_BUFFER_DECREASE_LATENCY, + BPFTUNABLE_SCENARIO(TCP_BUFFER_INCREASE, + "need to increase TCP buffer size(s)", + "Need to increase buffer size(s) to maximize throughput"), + BPFTUNABLE_SCENARIO(TCP_BUFFER_DECREASE, + "need to decrease TCP buffer size(s)", + "Need to decrease buffer size(s) to reduce memory utilization"), + BPFTUNABLE_SCENARIO(TCP_BUFFER_DECREASE_LATENCY, "need to decrease TCP buffer size due to latency", - "Latency is starting to correlate with buffer size increases, so decrease buffer size to avoid this effect" }, -{ TCP_MEM_PRESSURE, "approaching TCP memory pressure", - "Since memory pressure/exhaustion are unstable system states, adjust tcp memory-related tunables" }, -{ TCP_MEM_EXHAUSTION, "approaching TCP memory exhaustion", - "Since memory exhaustion is a highly unstable state, adjust TCP memory-related tunables to avoid exhaustion" }, -{ TCP_MODERATE_RCVBUF_ENABLE, "match receive buffer size with throughput needs", - "Since we are tuning rcvbuf max size, ensure auto-tuning of rcvbuf size for the connection is enabled to pick optimal rcvbuf size" }, -{ TCP_LOW_MEM_ENTER_ENABLE, "set tunable on entering low-memory state", - "In low-memory situations, avoid activities like skb high order allocations, per-path TCP metric collection which can lead to overheads" }, -{ TCP_LOW_MEM_LEAVE_DISABLE, "unset tunable set earlier in low-memory state", - "Due to easing of memory strain, unset tunables to allow skb high order allocations, (re)-enable TCP metrics collection etc" }, -{ TCP_MAX_SYN_BACKLOG_INCREASE, "increase maximum syn backlog under load since syncookies are disabled", - "Due to the fact that syncookies are disabled and we are seeing a large number of legitimate-seeming TCP connections, increase TCP maximum SYN backlog queue length" }, -{ TCP_MAX_SYN_BACKLOG_DECREASE, "decrease maximum syn backlog due to large numbers of uncompleted connections", - "A large number of connection requests (SYNs) uncorrelated with connection establishment suggest a more cautious approach to handling pending connections to avoid Denial of Service attacks" }, -{ TCP_SYNCOOKIES_ENABLE, "enable syncookies as furthern SYN backlog increases do not help", - "SYN flood conditions have been detected, but further increases to SYN backlog are not advisable; try using syncookies instead" }, -{ TCP_SYNCOOKIES_DISABLE, "disable syncookies as they are ineffective", - "TCP syncookies are not effective; none have been validated successfully" }, -{ TCP_MAX_ORPHANS_INCREASE, + "Latency is starting to correlate with buffer size increases, so decrease buffer size to avoid this effect"), + BPFTUNABLE_SCENARIO(TCP_MEM_PRESSURE, + "approaching TCP memory pressure", + "Since memory pressure/exhaustion are unstable system states, adjust tcp memory-related tunables"), + BPFTUNABLE_SCENARIO(TCP_MEM_EXHAUSTION, + "approaching TCP memory exhaustion", + "Since memory exhaustion is a highly unstable state, adjust TCP memory-related tunables to avoid exhaustion"), + BPFTUNABLE_SCENARIO(TCP_MODERATE_RCVBUF_ENABLE, + "match receive buffer size with throughput needs", + "Since we are tuning rcvbuf max size, ensure auto-tuning of rcvbuf size for the connection is enabled to pick optimal rcvbuf size"), + BPFTUNABLE_SCENARIO(TCP_LOW_MEM_ENTER_ENABLE, + "set tunable on entering low-memory state", + "In low-memory situations, avoid activities like skb high order allocations, per-path TCP metric collection which can lead to overheads"), + BPFTUNABLE_SCENARIO(TCP_LOW_MEM_LEAVE_DISABLE, + "unset tunable set earlier in low-memory state", + "Due to easing of memory strain, unset tunables to allow skb high order allocations, (re)-enable TCP metrics collection etc"), + BPFTUNABLE_SCENARIO(TCP_MAX_SYN_BACKLOG_INCREASE, + "increase maximum syn backlog under load since syncookies are disabled", + "Due to the fact that syncookies are disabled and we are seeing a large number of legitimate-seeming TCP connections, increase TCP maximum SYN backlog queue length"), + BPFTUNABLE_SCENARIO(TCP_MAX_SYN_BACKLOG_DECREASE, + "decrease maximum syn backlog due to large numbers of uncompleted connections", + "A large number of connection requests (SYNs) uncorrelated with connection establishment suggest a more cautious approach to handling pending connections to avoid Denial of Service attacks"), + BPFTUNABLE_SCENARIO(TCP_SYNCOOKIES_ENABLE, + "enable syncookies as furthern SYN backlog increases do not help", + "SYN flood conditions have been detected, but further increases to SYN backlog are not advisable; try using syncookies instead"), + BPFTUNABLE_SCENARIO(TCP_SYNCOOKIES_DISABLE, + "disable syncookies as they are ineffective", + "TCP syncookies are not effective; none have been validated successfully"), + BPFTUNABLE_SCENARIO(TCP_MAX_ORPHANS_INCREASE, "increase max number of orphaned sockets", - "" }, + ""), }; /* When TCP starts up, it calls nr_free_buffer_pages() and uses it to estimate diff --git a/src/tcp_conn_tuner.c b/src/tcp_conn_tuner.c index b9032cb..1dbdab4 100644 --- a/src/tcp_conn_tuner.c +++ b/src/tcp_conn_tuner.c @@ -45,8 +45,9 @@ static struct bpftunable_desc descs[] = { }; static struct bpftunable_scenario scenarios[] = { -{ TCP_CONG_SET, "specify TCP congestion control algorithm", - "To optimize TCP performance, a TCP congestion control algorithm was chosen to mimimize round-trip time and maximize delivery rate." }, + BPFTUNABLE_SCENARIO(TCP_CONG_SET, + "specify TCP congestion control algorithm", + "To optimize TCP performance, a TCP congestion control algorithm was chosen to mimimize round-trip time and maximize delivery rate."), }; struct tcp_conn_tuner_bpf *skel; diff --git a/src/udp_buffer_tuner.c b/src/udp_buffer_tuner.c index a2e1ca8..6cf7cdf 100644 --- a/src/udp_buffer_tuner.c +++ b/src/udp_buffer_tuner.c @@ -24,14 +24,18 @@ static struct bpftunable_desc descs[] = { }; static struct bpftunable_scenario scenarios[] = { -{ UDP_BUFFER_INCREASE, "need to increase UDP buffer size(s)", - "Need to increase buffer size(s) to maximize throughput and reduce loss" }, -{ UDP_BUFFER_DECREASE, "need to decrease UDP buffer size(s)", - "Need to decrease buffer size(s) to reduce memory utilization" }, -{ UDP_MEM_PRESSURE, "approaching UDP memory pressure", - "Since memory pressure/exhaustion are unstable system states, adjust UDP memory-related tunables" }, -{ UDP_MEM_EXHAUSTION, "approaching UDP memory exhaustion", - "Since memory exhaustion is a highly unstable state, adjust UDP memory-related tunables to avoid exhaustion" }, + BPFTUNABLE_SCENARIO(UDP_BUFFER_INCREASE, + "need to increase UDP buffer size(s)", + "Need to increase buffer size(s) to maximize throughput and reduce loss"), + BPFTUNABLE_SCENARIO(UDP_BUFFER_DECREASE, + "need to decrease UDP buffer size(s)", + "Need to decrease buffer size(s) to reduce memory utilization"), + BPFTUNABLE_SCENARIO(UDP_MEM_PRESSURE, + "approaching UDP memory pressure", + "Since memory pressure/exhaustion are unstable system states, adjust UDP memory-related tunables"), + BPFTUNABLE_SCENARIO(UDP_MEM_EXHAUSTION, + "approaching UDP memory exhaustion", + "Since memory exhaustion is a highly unstable state, adjust UDP memory-related tunables to avoid exhaustion"), }; /* When UDP starts up, it calls nr_free_buffer_pages() and uses it to estimate