diff options
Diffstat (limited to 'hosts/vidhar')
-rw-r--r-- | hosts/vidhar/borg/borgsnap/borgsnap/__main__.py | 202 | ||||
-rw-r--r-- | hosts/vidhar/borg/borgsnap/setup.py | 10 | ||||
-rwxr-xr-x | hosts/vidhar/borg/copy.py | 4 | ||||
-rw-r--r-- | hosts/vidhar/borg/default.nix | 92 | ||||
-rw-r--r-- | hosts/vidhar/default.nix | 39 | ||||
-rw-r--r-- | hosts/vidhar/prometheus/default.nix | 50 | ||||
-rw-r--r-- | hosts/vidhar/zfs.nix | 16 |
7 files changed, 363 insertions, 50 deletions
diff --git a/hosts/vidhar/borg/borgsnap/borgsnap/__main__.py b/hosts/vidhar/borg/borgsnap/borgsnap/__main__.py new file mode 100644 index 00000000..e93e6a60 --- /dev/null +++ b/hosts/vidhar/borg/borgsnap/borgsnap/__main__.py | |||
@@ -0,0 +1,202 @@ | |||
1 | import argparse | ||
2 | import os, sys, signal | ||
3 | from pyprctl import cap_permitted, cap_inheritable, cap_effective, cap_ambient, Cap | ||
4 | from pwd import getpwnam | ||
5 | |||
6 | from datetime import datetime, timezone | ||
7 | from dateutil.parser import isoparse | ||
8 | |||
9 | from xdg import xdg_runtime_dir | ||
10 | import unshare | ||
11 | from tempfile import TemporaryDirectory | ||
12 | |||
13 | import logging | ||
14 | |||
15 | import json | ||
16 | import subprocess | ||
17 | |||
18 | import pathlib | ||
19 | from pathlib import Path | ||
20 | |||
21 | from atomicwrites import atomic_write | ||
22 | |||
23 | from traceback import format_exc | ||
24 | |||
25 | |||
26 | borg_pwd = getpwnam('borg') | ||
27 | |||
28 | def as_borg(caps=set(), cwd=None): | ||
29 | if caps: | ||
30 | cap_permitted.add(*caps) | ||
31 | cap_inheritable.add(*caps) | ||
32 | cap_effective.add(*caps) | ||
33 | cap_ambient.add(*caps) | ||
34 | |||
35 | os.setgid(borg_pwd.pw_gid) | ||
36 | os.setuid(borg_pwd.pw_uid) | ||
37 | |||
38 | if cwd is not None: | ||
39 | os.chdir(cwd) | ||
40 | |||
41 | |||
42 | def _archive_name(snapshot, target, archive_prefix): | ||
43 | _, _, ts = snapshot.rpartition('@') | ||
44 | creation_time = isoparse(ts).astimezone(timezone.utc) | ||
45 | archive_name = _archive_basename(snapshot, archive_prefix) | ||
46 | return f'{target}::{archive_name}-{creation_time.strftime("%Y-%m-%dT%H:%M:%S")}' | ||
47 | |||
48 | def _archive_basename(snapshot, archive_prefix): | ||
49 | base_name, _, _ = snapshot.rpartition('@') | ||
50 | return archive_prefix + base_name.replace('-', '--').replace('/', '-') | ||
51 | |||
52 | def check(*, snapshot, target, archive_prefix, cache_file): | ||
53 | archives = None | ||
54 | if cache_file: | ||
55 | logger.debug('Trying cache...') | ||
56 | try: | ||
57 | with open(cache_file, mode='r', encoding='utf-8') as fp: | ||
58 | archives = set(json.load(fp)) | ||
59 | logger.info('Loaded archive list from cache') | ||
60 | except FileNotFoundError: | ||
61 | pass | ||
62 | |||
63 | if not archives: | ||
64 | logger.info('Loading archive list from remote...') | ||
65 | with subprocess.Popen(['borg', 'list', '--info', '--lock-wait=600', '--json', target], stdout=subprocess.PIPE, preexec_fn=lambda: as_borg()) as proc: | ||
66 | archives = set([archive['barchive'] for archive in json.load(proc.stdout)['archives']]) | ||
67 | if cache_file: | ||
68 | logger.debug('Saving archive list to cache...') | ||
69 | with atomic_write(cache_file, mode='w', encoding='utf-8', overwrite=True) as fp: | ||
70 | json.dump(list(archives), fp) | ||
71 | |||
72 | # logger.debug(f'archives: {archives}') | ||
73 | _, _, archive_name = _archive_name(snapshot, target, archive_prefix).partition('::') | ||
74 | if archive_name in archives: | ||
75 | logger.info(f'{archive_name} found') | ||
76 | return 0 | ||
77 | else: | ||
78 | logger.info(f'{archive_name} not found') | ||
79 | return 126 | ||
80 | |||
81 | def create(*, snapshot, target, archive_prefix, dry_run): | ||
82 | basename = _archive_basename(snapshot, archive_prefix) | ||
83 | |||
84 | with TemporaryDirectory(prefix=f'borg-mount_{basename}_', dir=os.environ.get('RUNTIME_DIRECTORY')) as tmpdir: | ||
85 | child = os.fork() | ||
86 | if child == 0: | ||
87 | unshare.unshare(unshare.CLONE_NEWNS) | ||
88 | subprocess.run(['mount', '--make-rprivate', '/'], check=True) | ||
89 | chroot = pathlib.Path(tmpdir) / 'chroot' | ||
90 | upper = pathlib.Path(tmpdir) / 'upper' | ||
91 | work = pathlib.Path(tmpdir) / 'work' | ||
92 | for path in [chroot,upper,work]: | ||
93 | path.mkdir() | ||
94 | subprocess.run(['mount', '-t', 'overlay', 'overlay', '-o', f'lowerdir=/,upperdir={upper},workdir={work}', chroot], check=True) | ||
95 | bindMounts = ['nix', 'run', 'run/secrets.d', 'run/wrappers', 'proc', 'dev', 'sys', pathlib.Path(os.path.expanduser('~')).relative_to('/')] | ||
96 | if os.environ.get('BORG_BASE_DIR'): | ||
97 | bindMounts.append(pathlib.Path(os.environ['BORG_BASE_DIR']).relative_to('/')) | ||
98 | if 'SSH_AUTH_SOCK' in os.environ: | ||
99 | bindMounts.append(pathlib.Path(os.environ['SSH_AUTH_SOCK']).parent.relative_to('/')) | ||
100 | for bindMount in bindMounts: | ||
101 | (chroot / bindMount).mkdir(parents=True,exist_ok=True) | ||
102 | # print(*['mount', '--bind', pathlib.Path('/') / bindMount, chroot / bindMount], file=stderr) | ||
103 | subprocess.run(['mount', '--bind', pathlib.Path('/') / bindMount, chroot / bindMount], check=True) | ||
104 | os.chroot(chroot) | ||
105 | os.chdir('/') | ||
106 | dir = pathlib.Path('/borg') | ||
107 | dir.mkdir(parents=True,exist_ok=True,mode=0o0750) | ||
108 | os.chown(dir, borg_pwd.pw_uid, borg_pwd.pw_gid) | ||
109 | try: | ||
110 | subprocess.run(['mount', '-t', 'zfs', '-o', 'ro', snapshot, dir], check=True) | ||
111 | env = os.environ.copy() | ||
112 | create_args = ['borg', | ||
113 | 'create', | ||
114 | '--lock-wait=600', | ||
115 | '--one-file-system', | ||
116 | '--compression=auto,zstd,10', | ||
117 | '--chunker-params=10,23,16,4095', | ||
118 | '--files-cache=ctime,size', | ||
119 | '--show-rc', | ||
120 | # '--remote-ratelimit=20480', | ||
121 | '--progress', | ||
122 | '--list', | ||
123 | '--filter=AMEi-x?', | ||
124 | '--stats' if not dry_run else '--dry-run' | ||
125 | ] | ||
126 | _, _, ts = snapshot.rpartition('@') | ||
127 | creation_time = isoparse(ts).astimezone(timezone.utc) | ||
128 | create_args += [f'--timestamp={creation_time.strftime("%Y-%m-%dT%H:%M:%S")}'] | ||
129 | env['BORG_FILES_CACHE_SUFFIX'] = basename | ||
130 | create_args += [_archive_name(snapshot, target, archive_prefix), '.'] | ||
131 | print({'create_args': create_args, 'cwd': dir, 'env': env}, file=sys.stderr) | ||
132 | subprocess.run(create_args, stdin=subprocess.DEVNULL, env=env, preexec_fn=lambda: as_borg(caps={CAP.DAC_READ_SEARCH}, cwd=dir), check=True) | ||
133 | # subprocess.run(create_args, stdin=subprocess.DEVNULL, env=env, preexec_fn=lambda: None, cwd=dir, check=True) | ||
134 | finally: | ||
135 | subprocess.run(['umount', dir], check=True) | ||
136 | os._exit(0) | ||
137 | else: | ||
138 | while True: | ||
139 | waitpid, waitret = os.wait() | ||
140 | if waitret != 0: | ||
141 | sys.exit(waitret) | ||
142 | if waitpid == child: | ||
143 | break | ||
144 | return 0 | ||
145 | |||
146 | def sigterm(signum, frame): | ||
147 | raise SystemExit(128 + signum) | ||
148 | |||
149 | def main(): | ||
150 | signal.signal(signal.SIGTERM, sigterm) | ||
151 | |||
152 | global logger | ||
153 | logger = logging.getLogger(__name__) | ||
154 | console_handler = logging.StreamHandler() | ||
155 | console_handler.setFormatter( logging.Formatter('[%(levelname)s](%(name)s): %(message)s') ) | ||
156 | if sys.stderr.isatty(): | ||
157 | console_handler.setFormatter( logging.Formatter('%(asctime)s [%(levelname)s](%(name)s): %(message)s') ) | ||
158 | logger.addHandler(console_handler) | ||
159 | |||
160 | # log uncaught exceptions | ||
161 | def log_exceptions(type, value, tb): | ||
162 | global logger | ||
163 | |||
164 | logger.error(value) | ||
165 | sys.__excepthook__(type, value, tb) # calls default excepthook | ||
166 | |||
167 | sys.excepthook = log_exceptions | ||
168 | |||
169 | parser = argparse.ArgumentParser(prog='borgsnap') | ||
170 | parser.add_argument('--verbose', '-v', action='count', default=0) | ||
171 | parser.add_argument('--target', metavar='REPO', default='yggdrasil.borgbase:repo') | ||
172 | parser.add_argument('--archive-prefix', metavar='REPO', default='yggdrasil.vidhar.') | ||
173 | subparsers = parser.add_subparsers() | ||
174 | subparsers.required = True | ||
175 | parser.set_defaults(cmd=None) | ||
176 | check_parser = subparsers.add_parser('check') | ||
177 | check_parser.add_argument('--cache-file', type=lambda p: Path(p).absolute(), default=None) | ||
178 | check_parser.add_argument('snapshot') | ||
179 | check_parser.set_defaults(cmd=check) | ||
180 | create_parser = subparsers.add_parser('create') | ||
181 | create_parser.add_argument('--dry-run', '-n', action='store_true', default=False) | ||
182 | create_parser.add_argument('snapshot') | ||
183 | create_parser.set_defaults(cmd=create) | ||
184 | args = parser.parse_args() | ||
185 | |||
186 | if args.verbose <= 0: | ||
187 | logger.setLevel(logging.WARNING) | ||
188 | elif args.verbose <= 1: | ||
189 | logger.setLevel(logging.INFO) | ||
190 | else: | ||
191 | logger.setLevel(logging.DEBUG) | ||
192 | |||
193 | cmdArgs = {} | ||
194 | for copy in {'target', 'archive_prefix', 'snapshot', 'cache_file', 'dry_run'}: | ||
195 | if copy in vars(args): | ||
196 | cmdArgs[copy] = vars(args)[copy] | ||
197 | |||
198 | return args.cmd(**cmdArgs) | ||
199 | |||
200 | |||
201 | if __name__ == '__main__': | ||
202 | sys.exit(main()) | ||
diff --git a/hosts/vidhar/borg/borgsnap/setup.py b/hosts/vidhar/borg/borgsnap/setup.py new file mode 100644 index 00000000..76356bfc --- /dev/null +++ b/hosts/vidhar/borg/borgsnap/setup.py | |||
@@ -0,0 +1,10 @@ | |||
1 | from setuptools import setup | ||
2 | |||
3 | setup(name='borgsnap', | ||
4 | packages=['borgsnap'], | ||
5 | entry_points={ | ||
6 | 'console_scripts': [ | ||
7 | 'borgsnap=borgsnap.__main__:main', | ||
8 | ], | ||
9 | } | ||
10 | ) | ||
diff --git a/hosts/vidhar/borg/copy.py b/hosts/vidhar/borg/copy.py index 4e9599b8..b9b667f2 100755 --- a/hosts/vidhar/borg/copy.py +++ b/hosts/vidhar/borg/copy.py | |||
@@ -71,7 +71,7 @@ def read_repo(path): | |||
71 | 71 | ||
72 | class ToSync: | 72 | class ToSync: |
73 | to_sync = deque() | 73 | to_sync = deque() |
74 | 74 | ||
75 | def __iter__(self): | 75 | def __iter__(self): |
76 | return self | 76 | return self |
77 | 77 | ||
@@ -267,7 +267,7 @@ def sigterm(signum, frame): | |||
267 | 267 | ||
268 | def main(): | 268 | def main(): |
269 | signal.signal(signal.SIGTERM, sigterm) | 269 | signal.signal(signal.SIGTERM, sigterm) |
270 | 270 | ||
271 | if "::" in args.source: | 271 | if "::" in args.source: |
272 | (src_repo_path, _, src_archive) = args.source.partition("::") | 272 | (src_repo_path, _, src_archive) = args.source.partition("::") |
273 | entry = None | 273 | entry = None |
diff --git a/hosts/vidhar/borg/default.nix b/hosts/vidhar/borg/default.nix index 579630a9..650c91ee 100644 --- a/hosts/vidhar/borg/default.nix +++ b/hosts/vidhar/borg/default.nix | |||
@@ -1,23 +1,28 @@ | |||
1 | { config, pkgs, lib, ... }: | 1 | { config, pkgs, lib, flakeInputs, ... }: |
2 | 2 | ||
3 | with lib; | 3 | with lib; |
4 | 4 | ||
5 | let | 5 | let |
6 | sshConfig = pkgs.writeText "config" '' | ||
7 | Include /etc/ssh/ssh_config | ||
8 | |||
9 | ControlMaster auto | ||
10 | ControlPath /var/lib/borg/.borgssh-master-%r@%n:%p | ||
11 | ControlPersist yes | ||
12 | |||
13 | Host yggdrasil.borgbase | ||
14 | HostName nx69hpl8.repo.borgbase.com | ||
15 | User nx69hpl8 | ||
16 | IdentityFile ${config.sops.secrets."append.borgbase".path} | ||
17 | IdentitiesOnly yes | ||
18 | |||
19 | BatchMode yes | ||
20 | ServerAliveInterval 10 | ||
21 | ServerAliveCountMax 30 | ||
22 | ''; | ||
23 | |||
6 | copyService = { repo, repoEscaped }: let | 24 | copyService = { repo, repoEscaped }: let |
7 | serviceName = "copy-borg@${repoEscaped}"; | 25 | serviceName = "copy-borg@${repoEscaped}"; |
8 | sshConfig = pkgs.writeText "config" '' | ||
9 | Include /etc/ssh/ssh_config | ||
10 | |||
11 | Host yggdrasil.borgbase | ||
12 | HostName nx69hpl8.repo.borgbase.com | ||
13 | User nx69hpl8 | ||
14 | IdentityFile ${config.sops.secrets."append.borgbase".path} | ||
15 | IdentitiesOnly yes | ||
16 | |||
17 | BatchMode yes | ||
18 | ServerAliveInterval 10 | ||
19 | ServerAliveCountMax 30 | ||
20 | ''; | ||
21 | in nameValuePair serviceName { | 26 | in nameValuePair serviceName { |
22 | serviceConfig = { | 27 | serviceConfig = { |
23 | Type = "oneshot"; | 28 | Type = "oneshot"; |
@@ -72,8 +77,63 @@ let | |||
72 | --prefix PATH : ${makeBinPath (with pkgs; [utillinux borgbackup])}:${config.security.wrapperDir} | 77 | --prefix PATH : ${makeBinPath (with pkgs; [utillinux borgbackup])}:${config.security.wrapperDir} |
73 | ''; | 78 | ''; |
74 | }); | 79 | }); |
80 | |||
81 | borgsnap = flakeInputs.mach-nix.lib.${config.nixpkgs.system}.buildPythonPackage rec { | ||
82 | pname = "borgsnap"; | ||
83 | src = ./borgsnap; | ||
84 | version = "0.0.0"; | ||
85 | ignoreDataOutdated = true; | ||
86 | |||
87 | requirements = '' | ||
88 | atomicwrites | ||
89 | pyprctl | ||
90 | python-unshare | ||
91 | xdg | ||
92 | python-dateutil | ||
93 | ''; | ||
94 | postInstall = '' | ||
95 | wrapProgram $out/bin/borgsnap \ | ||
96 | --prefix PATH : ${makeBinPath (with pkgs; [utillinux borgbackup])}:${config.security.wrapperDir} | ||
97 | ''; | ||
98 | |||
99 | providers.python-unshare = "nixpkgs"; | ||
100 | overridesPre = [ | ||
101 | (self: super: { python-unshare = super.python-unshare.overrideAttrs (oldAttrs: { name = "python-unshare-0.2.1"; version = "0.2.1"; }); }) | ||
102 | ]; | ||
103 | |||
104 | _.xdg.buildInputs.add = with pkgs."python3Packages"; [ poetry ]; | ||
105 | _.tomli.buildInputs.add = with pkgs."python3Packages"; [ flit-core ]; | ||
106 | }; | ||
75 | in { | 107 | in { |
76 | config = { | 108 | config = { |
109 | services.zfssnap.config.exec = { | ||
110 | check = "${borgsnap}/bin/borgsnap -vvv --target yggdrasil.borgbase:repo --archive-prefix yggdrasil.vidhar. check --cache-file /run/zfssnap-prune/archives-cache.json"; | ||
111 | cmd = "${borgsnap}/bin/borgsnap -vvv --target yggdrasil.borgbase:repo --archive-prefix yggdrasil.vidhar. create --dry-run"; | ||
112 | |||
113 | halfweekly = "8"; | ||
114 | monthly = "-1"; | ||
115 | }; | ||
116 | |||
117 | systemd.services = { | ||
118 | "zfssnap-prune" = { | ||
119 | serviceConfig = { | ||
120 | Environment = [ | ||
121 | "BORG_RSH=\"${pkgs.openssh}/bin/ssh -F ${sshConfig}\"" | ||
122 | "BORG_BASE_DIR=/var/lib/borg" | ||
123 | "BORG_CONFIG_DIR=/var/lib/borg/config" | ||
124 | "BORG_CACHE_DIR=/var/lib/borg/cache" | ||
125 | "BORG_SECURITY_DIR=/var/lib/borg/security" | ||
126 | "BORG_KEYS_DIR=/var/lib/borg/keys" | ||
127 | "BORG_KEY_FILE=${config.sops.secrets."yggdrasil.borgkey".path}" | ||
128 | "BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes" | ||
129 | "BORG_HOSTNAME_IS_UNIQUE=yes" | ||
130 | ]; | ||
131 | RuntimeDirectory = "zfssnap-prune"; | ||
132 | }; | ||
133 | }; | ||
134 | } // listToAttrs (map copyService [{ repo = "/srv/backup/borg/jotnar"; repoEscaped = "srv-backup-borg-jotnar"; }]); | ||
135 | |||
136 | |||
77 | services.borgbackup.repos.jotnar = { | 137 | services.borgbackup.repos.jotnar = { |
78 | path = "/srv/backup/borg/jotnar"; | 138 | path = "/srv/backup/borg/jotnar"; |
79 | authorizedKeysAppendOnly = let | 139 | authorizedKeysAppendOnly = let |
@@ -111,11 +171,9 @@ in { | |||
111 | mode = "0400"; | 171 | mode = "0400"; |
112 | }; | 172 | }; |
113 | 173 | ||
114 | systemd.services = listToAttrs (map copyService [{ repo = "/srv/backup/borg/jotnar"; repoEscaped = "srv-backup-borg-jotnar"; }]); | ||
115 | |||
116 | systemd.timers."copy-borg@srv-backup-borg-jotnar" = { | 174 | systemd.timers."copy-borg@srv-backup-borg-jotnar" = { |
117 | wantedBy = ["multi-user.target"]; | 175 | wantedBy = ["multi-user.target"]; |
118 | 176 | ||
119 | timerConfig = { | 177 | timerConfig = { |
120 | OnCalendar = "*-*-* 00/4:00:00 Europe/Berlin"; | 178 | OnCalendar = "*-*-* 00/4:00:00 Europe/Berlin"; |
121 | }; | 179 | }; |
diff --git a/hosts/vidhar/default.nix b/hosts/vidhar/default.nix index 121cc9df..3f5d17d5 100644 --- a/hosts/vidhar/default.nix +++ b/hosts/vidhar/default.nix | |||
@@ -1,4 +1,7 @@ | |||
1 | { hostName, flake, config, pkgs, lib, ... }: | 1 | { hostName, flake, config, pkgs, lib, ... }: |
2 | |||
3 | with lib; | ||
4 | |||
2 | { | 5 | { |
3 | imports = with flake.nixosModules.systemProfiles; [ | 6 | imports = with flake.nixosModules.systemProfiles; [ |
4 | ./zfs.nix ./network ./samba.nix ./dns ./prometheus ./borg | 7 | ./zfs.nix ./network ./samba.nix ./dns ./prometheus ./borg |
@@ -39,7 +42,7 @@ | |||
39 | luks.devices = { | 42 | luks.devices = { |
40 | nvm0 = { device = "/dev/disk/by-label/${hostName}-nvm0"; bypassWorkqueues = true; }; | 43 | nvm0 = { device = "/dev/disk/by-label/${hostName}-nvm0"; bypassWorkqueues = true; }; |
41 | nvm1 = { device = "/dev/disk/by-label/${hostName}-nvm1"; bypassWorkqueues = true; }; | 44 | nvm1 = { device = "/dev/disk/by-label/${hostName}-nvm1"; bypassWorkqueues = true; }; |
42 | 45 | ||
43 | hdd0.device = "/dev/disk/by-label/${hostName}-hdd0"; | 46 | hdd0.device = "/dev/disk/by-label/${hostName}-hdd0"; |
44 | hdd1.device = "/dev/disk/by-label/${hostName}-hdd1"; | 47 | hdd1.device = "/dev/disk/by-label/${hostName}-hdd1"; |
45 | hdd2.device = "/dev/disk/by-label/${hostName}-hdd2"; | 48 | hdd2.device = "/dev/disk/by-label/${hostName}-hdd2"; |
@@ -58,7 +61,7 @@ | |||
58 | options = [ "mode=0755" ]; | 61 | options = [ "mode=0755" ]; |
59 | }; | 62 | }; |
60 | }; | 63 | }; |
61 | 64 | ||
62 | services.timesyncd.enable = false; | 65 | services.timesyncd.enable = false; |
63 | services.chrony = { | 66 | services.chrony = { |
64 | enable = true; | 67 | enable = true; |
@@ -132,6 +135,7 @@ | |||
132 | access_log syslog:server=unix:/dev/log main; | 135 | access_log syslog:server=unix:/dev/log main; |
133 | error_log syslog:server=unix:/dev/log info; | 136 | error_log syslog:server=unix:/dev/log info; |
134 | 137 | ||
138 | client_body_buffer_size 16m; | ||
135 | client_body_temp_path /run/nginx-client-bodies; | 139 | client_body_temp_path /run/nginx-client-bodies; |
136 | ''; | 140 | ''; |
137 | upstreams.grafana = { | 141 | upstreams.grafana = { |
@@ -173,12 +177,12 @@ | |||
173 | sopsFile = ./selfsigned.key; | 177 | sopsFile = ./selfsigned.key; |
174 | }; | 178 | }; |
175 | systemd.services.nginx = { | 179 | systemd.services.nginx = { |
176 | preStart = lib.mkForce config.services.nginx.preStart; | 180 | preStart = mkForce config.services.nginx.preStart; |
177 | serviceConfig = { | 181 | serviceConfig = { |
178 | ExecReload = lib.mkForce "${pkgs.coreutils}/bin/kill -HUP $MAINPID"; | 182 | ExecReload = mkForce "${pkgs.coreutils}/bin/kill -HUP $MAINPID"; |
179 | LoadCredential = [ "selfsigned.key:${config.sops.secrets."selfsigned.key".path}" ]; | 183 | LoadCredential = [ "selfsigned.key:${config.sops.secrets."selfsigned.key".path}" ]; |
180 | 184 | ||
181 | RuntimeDirectory = lib.mkForce [ "nginx" "nginx-client-bodies" ]; | 185 | RuntimeDirectory = mkForce [ "nginx" "nginx-client-bodies" ]; |
182 | RuntimeDirectoryMode = "0750"; | 186 | RuntimeDirectoryMode = "0750"; |
183 | }; | 187 | }; |
184 | }; | 188 | }; |
@@ -232,7 +236,7 @@ | |||
232 | }; | 236 | }; |
233 | }; | 237 | }; |
234 | systemd.services.loki.preStart = let | 238 | systemd.services.loki.preStart = let |
235 | rulesYaml = lib.generators.toYAML {} { | 239 | rulesYaml = generators.toYAML {} { |
236 | groups = [ | 240 | groups = [ |
237 | { name = "power-failures"; | 241 | { name = "power-failures"; |
238 | rules = [ | 242 | rules = [ |
@@ -311,6 +315,29 @@ | |||
311 | timers.wants = ["systemd-tmpfiles-clean.timer"]; | 315 | timers.wants = ["systemd-tmpfiles-clean.timer"]; |
312 | }; | 316 | }; |
313 | 317 | ||
318 | services.smartd = { | ||
319 | enable = true; | ||
320 | autodetect = false; | ||
321 | defaults.monitored = "-a -o on -s (S/../.././02|L/../../7/04)"; | ||
322 | devices = map (dev: { device = "/dev/disk/by-path/${dev}"; }) [ | ||
323 | "pci-0000:00:1f.2-ata-1" | ||
324 | "pci-0000:00:1f.2-ata-3" | ||
325 | "pci-0000:00:1f.2-ata-4" | ||
326 | "pci-0000:00:1f.2-ata-5" | ||
327 | "pci-0000:00:1f.2-ata-6" | ||
328 | "pci-0000:02:00.0-nvme-1" | ||
329 | "pci-0000:05:00.0-sas-phy0-lun-0" | ||
330 | "pci-0000:05:00.0-sas-phy1-lun-0" | ||
331 | "pci-0000:06:00.0-nvme-1" | ||
332 | ]; | ||
333 | notifications = { | ||
334 | test = false; | ||
335 | mail.enable = false; | ||
336 | x11.enable = false; | ||
337 | wall.enable = false; | ||
338 | }; | ||
339 | }; | ||
340 | |||
314 | environment.systemPackages = with pkgs; [iotop vmtouch]; | 341 | environment.systemPackages = with pkgs; [iotop vmtouch]; |
315 | 342 | ||
316 | system.stateVersion = "21.05"; | 343 | system.stateVersion = "21.05"; |
diff --git a/hosts/vidhar/prometheus/default.nix b/hosts/vidhar/prometheus/default.nix index 4c23d8a9..7ac86c30 100644 --- a/hosts/vidhar/prometheus/default.nix +++ b/hosts/vidhar/prometheus/default.nix | |||
@@ -34,20 +34,6 @@ in { | |||
34 | enable = true; | 34 | enable = true; |
35 | enabledCollectors = []; | 35 | enabledCollectors = []; |
36 | }; | 36 | }; |
37 | smartctl = { | ||
38 | enable = true; | ||
39 | devices = map (dev: "/dev/disk/by-path/${dev}") [ | ||
40 | "pci-0000:00:1f.2-ata-1" | ||
41 | "pci-0000:00:1f.2-ata-3" | ||
42 | "pci-0000:00:1f.2-ata-4" | ||
43 | "pci-0000:00:1f.2-ata-5" | ||
44 | "pci-0000:00:1f.2-ata-6" | ||
45 | "pci-0000:02:00.0-nvme-1" | ||
46 | "pci-0000:05:00.0-sas-phy0-lun-0" | ||
47 | "pci-0000:05:00.0-sas-phy1-lun-0" | ||
48 | "pci-0000:06:00.0-nvme-1" | ||
49 | ]; | ||
50 | }; | ||
51 | snmp = { | 37 | snmp = { |
52 | enable = true; | 38 | enable = true; |
53 | configurationPath = ./snmp.yml; | 39 | configurationPath = ./snmp.yml; |
@@ -124,10 +110,10 @@ in { | |||
124 | } | 110 | } |
125 | { job_name = "smartctl"; | 111 | { job_name = "smartctl"; |
126 | static_configs = [ | 112 | static_configs = [ |
127 | { targets = ["localhost:${toString config.services.prometheus.exporters.smartctl.port}"]; } | 113 | { targets = ["localhost:9633"]; } |
128 | ]; | 114 | ]; |
129 | relabel_configs = relabelHosts; | 115 | relabel_configs = relabelHosts; |
130 | scrape_interval = "1s"; | 116 | scrape_interval = "60s"; |
131 | } | 117 | } |
132 | { job_name = "snmp"; | 118 | { job_name = "snmp"; |
133 | static_configs = [ | 119 | static_configs = [ |
@@ -376,6 +362,30 @@ in { | |||
376 | }; | 362 | }; |
377 | }; | 363 | }; |
378 | 364 | ||
365 | systemd.services."prometheus-smartctl-exporter" = { | ||
366 | wantedBy = [ "multi-user.target" ]; | ||
367 | after = [ "network.target" ]; | ||
368 | path = with pkgs; [ smartmontools ]; | ||
369 | serviceConfig = { | ||
370 | Restart = "always"; | ||
371 | |||
372 | CapabilityBoundingSet = ["CAP_DAC_OVERRIDE" "CAP_SYS_RAWIO" "CAP_SYS_ADMIN"]; | ||
373 | AmbientCapabilities = ["CAP_DAC_OVERRIDE" "CAP_SYS_RAWIO" "CAP_SYS_ADMIN"]; | ||
374 | ProtectSystem = "strict"; | ||
375 | DynamicUser = true; | ||
376 | LockPersonality = true; | ||
377 | MemoryDenyWriteExecute = true; | ||
378 | NoNewPrivileges = true; | ||
379 | PrivateDevices = false; | ||
380 | PrivateTmp = true; | ||
381 | ProcSubset = "pid"; | ||
382 | |||
383 | Type = "simple"; | ||
384 | ExecStart = "${pkgs.smartprom}/bin/smartprom"; | ||
385 | Environment = "SMARTCTL_EXPORTER_PORT=9633"; | ||
386 | }; | ||
387 | }; | ||
388 | |||
379 | systemd.services."prometheus-systemd-exporter" = let | 389 | systemd.services."prometheus-systemd-exporter" = let |
380 | cfg = config.services.prometheus.exporters.systemd; | 390 | cfg = config.services.prometheus.exporters.systemd; |
381 | in { | 391 | in { |
@@ -385,14 +395,6 @@ in { | |||
385 | ''; | 395 | ''; |
386 | }; | 396 | }; |
387 | 397 | ||
388 | systemd.services."prometheus-smartctl-exporter" = { | ||
389 | serviceConfig = { | ||
390 | DeviceAllow = lib.mkForce config.services.prometheus.exporters.smartctl.devices; | ||
391 | CapabilityBoundingSet = lib.mkForce ["CAP_SYS_ADMIN"]; | ||
392 | AmbientCapabilities = lib.mkForce ["CAP_SYS_ADMIN"]; | ||
393 | }; | ||
394 | }; | ||
395 | |||
396 | services.nginx = { | 398 | services.nginx = { |
397 | upstreams.prometheus = { | 399 | upstreams.prometheus = { |
398 | servers = { "localhost:${toString config.services.prometheus.port}" = {}; }; | 400 | servers = { "localhost:${toString config.services.prometheus.port}" = {}; }; |
diff --git a/hosts/vidhar/zfs.nix b/hosts/vidhar/zfs.nix index ef285536..52b48aca 100644 --- a/hosts/vidhar/zfs.nix +++ b/hosts/vidhar/zfs.nix | |||
@@ -130,7 +130,21 @@ | |||
130 | echo "=== ZPOOL IMPORT COMPLETE ===" | 130 | echo "=== ZPOOL IMPORT COMPLETE ===" |
131 | ''; | 131 | ''; |
132 | 132 | ||
133 | services.zfssnap.enable = true; | 133 | services.zfssnap = { |
134 | enable = true; | ||
135 | config.keep = { | ||
136 | within = "15m"; | ||
137 | "5m" = "48"; | ||
138 | "15m" = "32"; | ||
139 | hourly = "48"; | ||
140 | "4h" = "24"; | ||
141 | "12h" = "12"; | ||
142 | daily = "62"; | ||
143 | halfweekly = "32"; | ||
144 | weekly = "24"; | ||
145 | monthly = "-1"; | ||
146 | }; | ||
147 | }; | ||
134 | services.zfs.trim.enable = false; | 148 | services.zfs.trim.enable = false; |
135 | services.zfs.autoScrub = { | 149 | services.zfs.autoScrub = { |
136 | enable = true; | 150 | enable = true; |