infra/nix/modules/sysctl.nix

148 lines
5.9 KiB
Nix
Raw Permalink Normal View History

2023-10-15 22:16:06 +02:00
{...}: {
boot.kernel.sysctl = {
"kernel.panic" = 60;
"vm.swappiness" = 2;
#"vm.vfs_cache_pressure" = 80;
"net.ipv4.ip_forward" = 1;
2023-12-22 00:09:52 +01:00
"net.ipv6.conf.all.forwarding" = 1;
2023-10-15 22:16:06 +02:00
#"net.ipv4.tcp_window_scaling" = 0;
# as per https://wiki.archlinux.org/index.php/Sysctl#Improving_performance
"net.core.rmem_default" = 1048576;
"net.core.rmem_max" = 16777216;
# "net.core.rmem_max" = 268435456;
"net.core.wmem_default" = 1048576;
"net.core.wmem_max" = 16777216;
# "net.core.wmem_max" = 268435456;
"net.core.optmem_max" = 65536;
# https://unix.stackexchange.com/a/471951
#
# "net.ipv4.tcp_rmem" = "4096 87380 20097152";
# "net.ipv4.tcp_wmem" = "4096 65536 16777216";
"net.ipv4.tcp_rmem" = "4096 87380 134217728";
"net.ipv4.tcp_wmem" = "4096 65536 134217728";
"net.ipv4.udp_rmem_min" = 8192;
"net.ipv4.udp_wmem_min" = 8192;
# TCP Fast Open is an extension to the transmission control protocol (TCP) that
# helps reduce network latency by enabling data to be exchanged during the
# sender's initial TCP SYN. Using the value 3 instead of the default 1 allows
# TCP Fast Open for both incoming and outgoing connections
"net.ipv4.tcp_fastopen" = 3;
# tcp_max_tw_buckets is the maximum number of sockets in TIME_WAIT state.
# After reaching this number the system will start destroying the socket that
# are in this state. Increase this to prevent simple DOS attacks
"net.ipv4.tcp_max_tw_buckets" = 2000000;
# tcp_tw_reuse sets whether TCP should reuse an existing connection in the
# TIME-WAIT state for a new outgoing connection if the new timestamp is
# strictly bigger than the most recent timestamp recorded for the previous
# connection.
# This helps avoid from running out of available network sockets
"net.ipv4.tcp_tw_reuse" = 1;
# With the following settings, your application will detect dead TCP
# connections after 120 seconds (60s + 10s + 10s + 10s + 10s + 10s + 10s).
"net.ipv4.tcp_keepalive_time" = 60;
"net.ipv4.tcp_keepalive_intvl" = 10;
"net.ipv4.tcp_keepalive_probes" = 6;
"net.ipv4.conf.default.rp_filter" = 2;
"net.ipv4.conf.all.rp_filter" = 2;
"net.ipv4.conf.default.log_martians" = 1;
"net.ipv4.conf.all.log_martians" = 1;
# Route cache is full: consider increasing sysctl net.ipv6.route.max_size
# net.ipv6.route.max_size = 8192;
"net.ipv6.route.max_size" = 65536;
# https://developer.akamai.com/blog/2012/09/27/linux-tcpip-tuning-scalability
"net.ipv4.ip_local_port_range" = "18000 65535";
#"net.netfilter.nf_conntrack_tcp_timeout_time_wait" = 30;
"net.netfilter.nf_conntrack_tcp_timeout_time_wait" = 60;
"net.netfilter.nf_conntrack_tcp_timeout_established" = 600;
"net.ipv4.tcp_slow_start_after_idle" = 0;
"net.ipv4.tcp_no_metrics_save" = 1;
# doesn't work on arch with Zen, works on fedora with XanMod.
"net.core.default_qdisc" = "fq";
# failed to initialize inotify - default value here was 128
"fs.inotify.max_user_instances" = 256;
"net.ipv4.tcp_window_scaling" = 1;
# The longer the maximum transmission unit (MTU) the better for performance,
# but the worse for reliability. This is because a lost packet means more data
# to be retransmitted and because many routers on the Internet cannot deliver
# very long packets
"net.ipv4.tcp_mtu_probing" = 1;
# sync disk when buffer reach 6% of memory
"vm.dirty_ratio" = 6;
"kernel.numa_balancing" = 1;
"net.core.netdev_max_backlog" = 250000;
# tcp_max_syn_backlog is the maximum queue length of pending connections
# 'Waiting Acknowledgment'. In the event of a synflood DOS attack, this queue
# can fill up pretty quickly, at which point TCP SYN cookies will kick in
# allowing your system to continue to respond to legitimate traffic, and
# allowing you to gain access to block malicious IPs. If the server suffers
# from overloads at peak times, you may want to increase this value a little
# bit
"net.ipv4.tcp_max_syn_backlog" = 8192;
# TCP SYN cookie protection
# Helps protect against SYN flood attacks. Only kicks in when
# net.ipv4.tcp_max_syn_backlog is reached. More details at, for example, [6].
# As of linux 5.10, it is set by default.
"net.ipv4.tcp_syncookies" = 1;
# Protect against tcp time-wait assassination hazards, drop RST packets for
# sockets in the time-wait state. Not widely supported outside of Linux, but
# conforms to RFC
"net.ipv4.tcp_rfc1337" = 1;
# Specify how many seconds to wait for a final FIN packet before the socket is
# forcibly closed. This is strictly a violation of the TCP specification, but
# required to prevent denial-of-service attacks. In Linux 2.2, the default
# value was 180
"net.ipv4.tcp_fin_timeout" = 30;
# When an attacker is trying to exploit the local kernel, it is often
# helpful to be able to examine where in memory the kernel, modules,
# and data structures live. As such, kernel addresses should be treated
# as sensitive information.
#
# Many files and interfaces contain these addresses (e.g. /proc/kallsyms,
# /proc/modules, etc), and this setting can censor the addresses. A value
# of "0" allows all users to see the kernel addresses. A value of "1"
# limits visibility to the root user, and "2" blocks even the root user.
"kernel.kptr_restrict" = 1;
# mitigate JIT spraying attacks from unprivileged users
"net.core.bpf_jit_harden" = 1;
# disallow regular users to run BPF programs
"kernel.unprivileged_bpf_disabled" = 0;
"fs.protected_fifos" = 1;
"fs.protected_symlinks" = 1;
"fs.protected_hardlinks" = 1;
"fs.protected_regular" = 2;
# full randomisation
"kernel.randomize_va_space" = 2;
"kernel.pid_max " = 4194304;
# ad rootless podman
"user.max_user_namespaces" = 15000;
"net.ipv4.ping_group_range" = "0 2000000";
};
}