546 lines
16 KiB
Nix
546 lines
16 KiB
Nix
{
|
|
config,
|
|
lib,
|
|
pkgs,
|
|
...
|
|
}:
|
|
let
|
|
# TODO: delegate to recvuser check out: https://klarasystems.com/articles/improving-replication-security-with-openzfs-delegation/
|
|
tailnet = "tail530c7.ts.net";
|
|
syncoidPull-t14 =
|
|
with [
|
|
pkgs.openssh
|
|
pkgs.networkmanager
|
|
pkgs.zfs
|
|
];
|
|
pkgs.writeShellScript "syncoid-pull-t14.sh" ''
|
|
#!/usr/bin/env bash
|
|
set -xeuo pipefail # failfast and be verbose
|
|
|
|
tailnet="${tailnet}"
|
|
_h="t14"
|
|
h="$_h.$tailnet"
|
|
u="root"
|
|
# k="~/.ssh/syncoider"
|
|
k="${config.sops.secrets.syncoider-priv.path}"
|
|
|
|
alias ssh="${pkgs.openssh}/bin/ssh -i $k -oStrictHostKeyChecking=accept-new"
|
|
# alias syncoid=${pkgs.sanoid}/bin/syncoid
|
|
|
|
|
|
### pre start
|
|
_conns="$(${pkgs.openssh}/bin/ssh -i $k -oStrictHostKeyChecking=accept-new $u@$h nmcli -g Name conn show --active | grep -v "docker0\|virbr0\|br-\|lo")"
|
|
declare -a _exc=(
|
|
BRITT
|
|
HausLenzGuest
|
|
HONOR
|
|
hack.lu
|
|
internet
|
|
)
|
|
|
|
# check networks
|
|
for _conn in $_exc; do
|
|
(echo $_conns | grep -q -E $_conn && echo connection "$_conn" is active, skipping syncoid pull now) && exit 0
|
|
done
|
|
### pre end
|
|
|
|
_srcpool="zroot"
|
|
_dstpool="z0"
|
|
# _dstpool="vault"
|
|
_dstparent="backups/snaps"
|
|
|
|
declare -a datasets=(
|
|
userdata
|
|
system
|
|
)
|
|
for _dataset in "''${datasets[@]}"; do
|
|
|
|
#_dataset="userdata"
|
|
src="$_srcpool/$_dataset"
|
|
dst="$_dstpool/$_dstparent/$_h/$_dataset"
|
|
${pkgs.sanoid}/bin/syncoid \
|
|
--no-privilege-elevation \
|
|
--no-sync-snap \
|
|
--preserve-recordsize --mbuffer-size=128M \
|
|
--compress=zstd-fast \
|
|
--no-clone-handling \
|
|
--recursive \
|
|
--sendoptions="wpV" \
|
|
--use-hold \
|
|
--sshkey=$k \
|
|
$u@$h:$src $dst
|
|
## $u@$h:$src $dst \
|
|
## && \
|
|
## \
|
|
## zfs list -Hpo creation,name,used -t snapshot -r $dst -s creation |\
|
|
## sed 's:\t\([^@]*\)@:\t\1\t:' |\
|
|
## column -J --table-columns creation,dataset,name,size -s $'\t' --table-name 'snapshots' |\
|
|
## ssh -i $k $u@$h 'cat > /var/status-syncoider-$_dataset.json'
|
|
|
|
done
|
|
|
|
|
|
### _dataset="system"
|
|
### src="$_srcpool/$_dataset"
|
|
### dst="$_dstpool/$_dstparent/$_h/$_dataset"
|
|
###
|
|
### syncoid \
|
|
### --no-privilege-elevation \
|
|
### --no-sync-snap \
|
|
### --recursive \
|
|
### --sendoptions=-w \
|
|
### --preserve-recordsize --mbuffer-size=128M \
|
|
### --compress=zstd-fast \
|
|
### --sshkey=$k \
|
|
### $u@$h:$src $dst \
|
|
### && \
|
|
### \
|
|
### zfs list -Hpo creation,name,used -t snapshot -r $dst -s creation |\
|
|
### sed 's:\t\([^@]*\)@:\t\1\t:' |\
|
|
### column -J --table-columns creation,dataset,name,size -s $'\t' --table-name 'snapshots' |\
|
|
### ssh -i $k $u@$h "cat > /var/status-syncoider-$_dataset.json"
|
|
'';
|
|
syncoidPull-loki =
|
|
with [
|
|
pkgs.openssh
|
|
pkgs.zfs
|
|
];
|
|
pkgs.writeShellScript "syncoid-pull-loki.sh" ''
|
|
#!/usr/bin/env bash
|
|
set -xeuo pipefail # failfast and be verbose
|
|
|
|
tailnet="${tailnet}"
|
|
_h="loki"
|
|
h="$_h.$tailnet"
|
|
u="root"
|
|
# k="~/.ssh/syncoider"
|
|
k="${config.sops.secrets.syncoider-priv.path}"
|
|
|
|
alias ssh="${pkgs.openssh}/bin/ssh -i $k -oStrictHostKeyChecking=accept-new"
|
|
# alias syncoid=${pkgs.sanoid}/bin/syncoid
|
|
|
|
_srcpool="zroot"
|
|
_dstpool="z0"
|
|
_dstparent="backups/snaps"
|
|
|
|
declare -a datasets=(
|
|
atuin
|
|
attic
|
|
vaultwarden
|
|
uptime-kuma
|
|
loki
|
|
shiori
|
|
postgresqlBackup
|
|
postgresql
|
|
open-webui
|
|
hass
|
|
nextcloud
|
|
prometheus
|
|
netbox
|
|
audiobookshelf
|
|
gonic
|
|
jellyfin
|
|
paperless
|
|
)
|
|
|
|
for _dataset in "''${datasets[@]}"; do
|
|
src="$_srcpool/DATA/services/$_dataset"
|
|
dst="$_dstpool/$_dstparent/$_h/$_dataset"
|
|
${pkgs.sanoid}/bin/syncoid \
|
|
--no-privilege-elevation \
|
|
--no-sync-snap \
|
|
--preserve-recordsize --mbuffer-size=128M \
|
|
--compress=zstd-fast \
|
|
--no-clone-handling \
|
|
--recursive \
|
|
--sendoptions="wpV" \
|
|
--use-hold \
|
|
--sshkey=$k \
|
|
$u@$h:$src $dst
|
|
done
|
|
|
|
### $u@$h:$src $dst \
|
|
### && \
|
|
### \
|
|
### zfs list -Hpo creation,name,used -t snapshot -r $dst -s creation |\
|
|
### sed 's:\t\([^@]*\)@:\t\1\t:' |\
|
|
### column -J --table-columns creation,dataset,name,size -s $'\t' --table-name 'snapshots' |\
|
|
### ssh -i $k $u@$h 'cat > /var/status-syncoider-DATA--services--$_dataset.json'
|
|
|
|
#${pkgs.zfs}/bin/zfs list -Hpo creation,name,used -t snapshot -r $dst -s creation |\
|
|
# sed 's:\t\([^@]*\)@:\t\1\t:' |\
|
|
# column -J --table-columns creation,dataset,name,size -s $'\t' --table-name 'snapshots' |\
|
|
# ssh -i $k $u@$h 'cat > /var/status-syncoider-DATA--services--$_dataset.json'
|
|
# done
|
|
'';
|
|
syncoidPull-caelum =
|
|
with [
|
|
pkgs.openssh
|
|
pkgs.zfs
|
|
];
|
|
pkgs.writeShellScript "syncoid-pull-caelum.sh" ''
|
|
#!/usr/bin/env bash
|
|
set -xeuo pipefail # failfast and be verbose
|
|
|
|
tailnet="${tailnet}"
|
|
_h="caelum"
|
|
h="$_h.$tailnet"
|
|
u="root"
|
|
# k="~/.ssh/syncoider"
|
|
k="${config.sops.secrets.syncoider-priv.path}"
|
|
|
|
alias ssh="${pkgs.openssh}/bin/ssh -i $k -oStrictHostKeyChecking=accept-new"
|
|
# alias syncoid=${pkgs.sanoid}/bin/syncoid
|
|
|
|
|
|
_srcpool="zr"
|
|
_dstpool="z0"
|
|
# _dstpool="vault"
|
|
_dstparent="backups/snaps"
|
|
|
|
declare -a datasets=(
|
|
userdata
|
|
system
|
|
)
|
|
for _dataset in "''${datasets[@]}"; do
|
|
src="$_srcpool/$_dataset"
|
|
dst="$_dstpool/$_dstparent/$_h/$_dataset"
|
|
${pkgs.sanoid}/bin/syncoid \
|
|
--no-privilege-elevation \
|
|
--no-sync-snap \
|
|
--preserve-recordsize --mbuffer-size=128M \
|
|
--compress=zstd-fast \
|
|
--no-clone-handling \
|
|
--recursive \
|
|
--sendoptions="wpV" \
|
|
--use-hold \
|
|
--sshkey=$k \
|
|
$u@$h:$src $dst
|
|
done
|
|
'';
|
|
syncoidPull-wyse =
|
|
with [
|
|
pkgs.openssh
|
|
pkgs.zfs
|
|
];
|
|
pkgs.writeShellScript "syncoid-pull-wyse.sh" ''
|
|
#!/usr/bin/env bash
|
|
set -xeuo pipefail # failfast and be verbose
|
|
|
|
tailnet="${tailnet}"
|
|
_h="wyse"
|
|
h="$_h.$tailnet"
|
|
u="root"
|
|
k="${config.sops.secrets.syncoider-priv.path}"
|
|
|
|
alias ssh="${pkgs.openssh}/bin/ssh -i $k -oStrictHostKeyChecking=accept-new"
|
|
|
|
|
|
_srcpool="zroot"
|
|
_dstpool="z0"
|
|
_dstparent="backups/snaps"
|
|
|
|
declare -a datasets=(
|
|
userdata
|
|
system
|
|
)
|
|
for _dataset in "''${datasets[@]}"; do
|
|
src="$_srcpool/$_dataset"
|
|
dst="$_dstpool/$_dstparent/$_h/$_dataset"
|
|
${pkgs.sanoid}/bin/syncoid \
|
|
--no-privilege-elevation \
|
|
--no-sync-snap \
|
|
--preserve-recordsize --mbuffer-size=128M \
|
|
--compress=zstd-fast \
|
|
--no-clone-handling \
|
|
--recursive \
|
|
--sendoptions="wpV" \
|
|
--use-hold \
|
|
--sshkey=$k \
|
|
$u@$h:$src $dst
|
|
done
|
|
'';
|
|
syncoidPull-nixurtur =
|
|
let
|
|
_host = "nixurtur";
|
|
in
|
|
with [
|
|
pkgs.openssh
|
|
pkgs.zfs
|
|
];
|
|
pkgs.writeShellScript "syncoid-pull-${_host}.sh" ''
|
|
#!/usr/bin/env bash
|
|
set -xeuo pipefail # failfast and be verbose
|
|
|
|
tailnet="${tailnet}"
|
|
_h="${_host}"
|
|
h="$_h.$tailnet"
|
|
u="root"
|
|
k="${config.sops.secrets.syncoider-priv.path}"
|
|
|
|
alias ssh="${pkgs.openssh}/bin/ssh -i $k -oStrictHostKeyChecking=accept-new"
|
|
|
|
|
|
_srcpool="zroot"
|
|
_dstpool="z0"
|
|
_dstparent="backups/snaps"
|
|
|
|
declare -a datasets=(
|
|
userdata
|
|
system
|
|
)
|
|
for _dataset in "''${datasets[@]}"; do
|
|
src="$_srcpool/$_dataset"
|
|
dst="$_dstpool/$_dstparent/$_h/$_dataset"
|
|
${pkgs.sanoid}/bin/syncoid \
|
|
--no-privilege-elevation \
|
|
--no-sync-snap \
|
|
--preserve-recordsize --mbuffer-size=128M \
|
|
--compress=zstd-fast \
|
|
--no-clone-handling \
|
|
--recursive \
|
|
--sendoptions="wpV" \
|
|
--use-hold \
|
|
--sshkey=$k \
|
|
$u@$h:$src $dst
|
|
done
|
|
'';
|
|
in
|
|
{
|
|
sops.secrets = {
|
|
syncoider-priv = {
|
|
# path = "/root/.ssh/syncoider";
|
|
mode = "0500";
|
|
};
|
|
};
|
|
# imports = [./systemd.nix];
|
|
systemd = {
|
|
services = {
|
|
"syncoid-pull-t14" = {
|
|
unitConfig = {
|
|
Description = "syncoid pull service for t14";
|
|
Requires = "local-fs.target";
|
|
After = [
|
|
"local-fs.target"
|
|
"sops-nix.service"
|
|
];
|
|
RequiresMountsFor = [
|
|
"/backups/snaps/t14"
|
|
config.sops.secrets.syncoider-priv.path
|
|
];
|
|
};
|
|
serviceConfig = {
|
|
Type = "oneshot";
|
|
ExecStart = syncoidPull-t14;
|
|
BindReadOnlyPaths = [
|
|
config.sops.secrets.syncoider-priv.path
|
|
];
|
|
WorkingDirectory = "/";
|
|
Restart = "on-abnormal";
|
|
RestartSec = 60;
|
|
Environment = "HOME=/root";
|
|
# TimeoutStartSec="30m";
|
|
TimeoutStopSec = "15";
|
|
};
|
|
};
|
|
"syncoid-pull-loki" = {
|
|
unitConfig = {
|
|
Description = "syncoid pull service for loki";
|
|
Requires = "local-fs.target";
|
|
After = [
|
|
"local-fs.target"
|
|
"sops-nix.service"
|
|
];
|
|
RequiresMountsFor = [
|
|
"/backups/snaps/loki"
|
|
config.sops.secrets.syncoider-priv.path
|
|
];
|
|
};
|
|
serviceConfig = {
|
|
Type = "oneshot";
|
|
ExecStart = syncoidPull-loki;
|
|
BindReadOnlyPaths = [
|
|
config.sops.secrets.syncoider-priv.path
|
|
];
|
|
WorkingDirectory = "/";
|
|
Restart = "on-abnormal";
|
|
RestartSec = 60;
|
|
Environment = "HOME=/root";
|
|
# TimeoutStartSec="1h";
|
|
TimeoutStopSec = "15";
|
|
};
|
|
};
|
|
"syncoid-pull-caelum" = {
|
|
unitConfig = {
|
|
Description = "syncoid pull service for caelum";
|
|
Requires = "local-fs.target";
|
|
After = [
|
|
"local-fs.target"
|
|
"sops-nix.service"
|
|
];
|
|
RequiresMountsFor = [
|
|
"/backups/snaps/caelum"
|
|
config.sops.secrets.syncoider-priv.path
|
|
];
|
|
};
|
|
serviceConfig = {
|
|
Type = "oneshot";
|
|
ExecStart = syncoidPull-caelum;
|
|
BindReadOnlyPaths = [
|
|
config.sops.secrets.syncoider-priv.path
|
|
];
|
|
WorkingDirectory = "/";
|
|
Restart = "on-abnormal";
|
|
RestartSec = 60;
|
|
Environment = "HOME=/root";
|
|
TimeoutStopSec = "15";
|
|
};
|
|
};
|
|
"syncoid-pull-wyse" = {
|
|
unitConfig = {
|
|
Description = "syncoid pull service for wyse";
|
|
Requires = "local-fs.target";
|
|
After = [
|
|
"local-fs.target"
|
|
"sops-nix.service"
|
|
];
|
|
RequiresMountsFor = [
|
|
"/backups/snaps"
|
|
config.sops.secrets.syncoider-priv.path
|
|
];
|
|
};
|
|
serviceConfig = {
|
|
Type = "oneshot";
|
|
ExecStart = syncoidPull-wyse;
|
|
BindReadOnlyPaths = [
|
|
config.sops.secrets.syncoider-priv.path
|
|
];
|
|
WorkingDirectory = "/";
|
|
Restart = "on-abnormal";
|
|
RestartSec = 60;
|
|
Environment = "HOME=/root";
|
|
TimeoutStopSec = "15";
|
|
};
|
|
};
|
|
"syncoid-pull-nixurtur" = {
|
|
unitConfig = {
|
|
Description = "syncoid pull service for nixurtur";
|
|
Requires = "local-fs.target";
|
|
After = [
|
|
"local-fs.target"
|
|
"sops-nix.service"
|
|
];
|
|
RequiresMountsFor = [
|
|
"/backups/snaps"
|
|
config.sops.secrets.syncoider-priv.path
|
|
];
|
|
};
|
|
serviceConfig = {
|
|
Type = "oneshot";
|
|
ExecStart = syncoidPull-nixurtur;
|
|
BindReadOnlyPaths = [
|
|
config.sops.secrets.syncoider-priv.path
|
|
];
|
|
WorkingDirectory = "/";
|
|
Restart = "on-abnormal";
|
|
RestartSec = 60;
|
|
Environment = "HOME=/root";
|
|
TimeoutStopSec = "15";
|
|
};
|
|
};
|
|
|
|
#"syncoid-pull@" = {
|
|
# unitConfig = {
|
|
# Description = "syncoid pull service for %I";
|
|
# Requires = "local-fs.target";
|
|
# After = "local-fs.target";
|
|
# };
|
|
# serviceConfig = {
|
|
# Type="oneshot";
|
|
# ExecStart="/usr/local/bin/syncoid-pull-%I";
|
|
# # ; WorkingDirectory=/opt/syncoid-pull/;
|
|
# WorkingDirectory="/";
|
|
# Restart="on-failure";
|
|
# RestartSec=60;
|
|
# Environment="HOME=/root";
|
|
# TimeoutStartSec="1h";
|
|
# };
|
|
#};
|
|
};
|
|
timers = {
|
|
"syncoid-pull-t14" = {
|
|
unitConfig.Description = "frequent syncoid pull service for t14";
|
|
timerConfig = {
|
|
# OnCalendar = "*:0/15";
|
|
# OnCalendar = "*:0/45";
|
|
OnActiveSec = 600;
|
|
OnCalendar = "hourly";
|
|
Persistent = "true";
|
|
RandomizedDelaySec = "30s";
|
|
};
|
|
wantedBy = [ "timers.target" ];
|
|
};
|
|
"syncoid-pull-loki" = {
|
|
unitConfig.Description = "frequent syncoid pull service for loki";
|
|
timerConfig = {
|
|
OnActiveSec = 600;
|
|
OnCalendar = "00/1:00"; # hourly.
|
|
Persistent = "true";
|
|
RandomizedDelaySec = "1m";
|
|
};
|
|
wantedBy = [ "timers.target" ];
|
|
};
|
|
"syncoid-pull-caelum" = {
|
|
unitConfig.Description = "frequent syncoid pull service for caelum";
|
|
timerConfig = {
|
|
OnActiveSec = 600;
|
|
OnCalendar = "00/4:00"; # every 4 hours
|
|
Persistent = "true";
|
|
RandomizedDelaySec = "3m";
|
|
};
|
|
wantedBy = [ "timers.target" ];
|
|
};
|
|
"syncoid-pull-wyse" = {
|
|
unitConfig.Description = "frequent syncoid pull service for wyse";
|
|
timerConfig = {
|
|
OnActiveSec = 600;
|
|
OnCalendar = "00/1:00"; # every hour
|
|
Persistent = "true";
|
|
RandomizedDelaySec = "3m";
|
|
};
|
|
wantedBy = [ "timers.target" ];
|
|
};
|
|
"syncoid-pull-nixurtur" = {
|
|
unitConfig.Description = "frequent syncoid pull service for nixurtur";
|
|
timerConfig = {
|
|
OnActiveSec = 600;
|
|
OnCalendar = "00/2:00"; # every 2 hours
|
|
Persistent = "true";
|
|
RandomizedDelaySec = "1m";
|
|
};
|
|
wantedBy = [ "timers.target" ];
|
|
};
|
|
|
|
#"syncoid-pull@-" = {
|
|
# unitConfig.Description = "frequent syncoid pull service for %I";
|
|
# timerConfig = {
|
|
# # OnCalendar = "*:0/15";
|
|
# OnCalendar = "*:0/45";
|
|
# Persistent = "true";
|
|
# RandomizedDelaySec = "30s";
|
|
# };
|
|
# wantedBy = [ "timers.target" ];
|
|
#};
|
|
};
|
|
};
|
|
|
|
services.openssh = {
|
|
knownHosts = {
|
|
"caelum.tail530c7.ts.net".publicKey =
|
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEV8najFNdarW6alxh/Gy07BMItwM837tEip4wF2oFGp";
|
|
"loki.tail530c7.ts.net".publicKey =
|
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINTtO1CywQTaP5FhzdN2D+aU7U/KRt8Vy/LPymZBB5dP";
|
|
};
|
|
};
|
|
}
|