diff options
author | Gregor Kleen <gkleen@yggdrasil.li> | 2023-03-13 15:42:35 +0100 |
---|---|---|
committer | Gregor Kleen <gkleen@yggdrasil.li> | 2023-03-13 15:42:35 +0100 |
commit | 6e820741126c9c8b156cf8f9fe285e96cafb3138 (patch) | |
tree | de2b4aeaff584420f2410ce47e0f906f68e79387 /modules | |
parent | b9e6f77db0871da3c72928619395590b28ea0181 (diff) | |
download | nixos-6e820741126c9c8b156cf8f9fe285e96cafb3138.tar nixos-6e820741126c9c8b156cf8f9fe285e96cafb3138.tar.gz nixos-6e820741126c9c8b156cf8f9fe285e96cafb3138.tar.bz2 nixos-6e820741126c9c8b156cf8f9fe285e96cafb3138.tar.xz nixos-6e820741126c9c8b156cf8f9fe285e96cafb3138.zip |
spin off backup-utils
Diffstat (limited to 'modules')
-rw-r--r-- | modules/backup-utils.nix | 13 | ||||
-rw-r--r-- | modules/borgsnap/borgsnap/borgsnap/__main__.py | 394 | ||||
-rw-r--r-- | modules/borgsnap/borgsnap/setup.py | 10 | ||||
-rw-r--r-- | modules/borgsnap/default.nix | 116 | ||||
-rw-r--r-- | modules/zfssnap/default.nix | 113 | ||||
-rw-r--r-- | modules/zfssnap/zfssnap/setup.py | 10 | ||||
-rw-r--r-- | modules/zfssnap/zfssnap/zfssnap/__main__.py | 438 |
7 files changed, 13 insertions, 1081 deletions
diff --git a/modules/backup-utils.nix b/modules/backup-utils.nix new file mode 100644 index 00000000..82a42ecd --- /dev/null +++ b/modules/backup-utils.nix | |||
@@ -0,0 +1,13 @@ | |||
1 | { flakeInputs, hostName, lib, ... }: | ||
2 | |||
3 | with lib; | ||
4 | |||
5 | { | ||
6 | imports = [ | ||
7 | flakeInputs.backup-utils.nixosModules.default | ||
8 | ]; | ||
9 | |||
10 | config = { | ||
11 | services.borgsnap.archive-prefix = mkDefault "yggdrasil.${hostName}."; | ||
12 | }; | ||
13 | } | ||
diff --git a/modules/borgsnap/borgsnap/borgsnap/__main__.py b/modules/borgsnap/borgsnap/borgsnap/__main__.py deleted file mode 100644 index cd8f1d76..00000000 --- a/modules/borgsnap/borgsnap/borgsnap/__main__.py +++ /dev/null | |||
@@ -1,394 +0,0 @@ | |||
1 | import argparse | ||
2 | import os, sys, signal, io | ||
3 | from pyprctl import CapState, Cap, cap_ambient_raise, cap_ambient_is_set, set_keepcaps | ||
4 | from pwd import getpwnam | ||
5 | |||
6 | from datetime import datetime, timezone | ||
7 | from dateutil.parser import isoparse | ||
8 | |||
9 | import unshare | ||
10 | from tempfile import TemporaryDirectory | ||
11 | |||
12 | import logging | ||
13 | |||
14 | import json | ||
15 | import subprocess | ||
16 | import csv | ||
17 | from collections import namedtuple | ||
18 | from distutils.util import strtobool | ||
19 | |||
20 | import pathlib | ||
21 | from pathlib import Path | ||
22 | |||
23 | from atomicwrites import atomic_write | ||
24 | |||
25 | from traceback import format_exc | ||
26 | |||
27 | from multiprocessing import Process, Manager | ||
28 | from contextlib import closing | ||
29 | |||
30 | from enum import Enum, auto | ||
31 | |||
32 | import select | ||
33 | import time | ||
34 | import math | ||
35 | |||
36 | |||
37 | PROP_DO_BORGSNAP = 'li.yggdrasil:borgsnap' | ||
38 | |||
39 | |||
40 | class DoValue(Enum): | ||
41 | BORGSNAP_DO = auto() | ||
42 | BORGSNAP_KEEP = auto() | ||
43 | BORGSNAP_DONT = auto() | ||
44 | |||
45 | @classmethod | ||
46 | def from_prop(cls, v: str): | ||
47 | if v.lower() == 'keep': | ||
48 | return cls.BORGSNAP_KEEP | ||
49 | |||
50 | return cls.BORGSNAP_DO if not v or bool(strtobool(v)) else cls.BORGSNAP_DONT | ||
51 | |||
52 | @classmethod | ||
53 | def merge(cls, v1, v2): | ||
54 | match (v1, v2): | ||
55 | case (cls.BORGSNAP_DONT, _): | ||
56 | return cls.BORGSNAP_DONT | ||
57 | case (_, cls.BORGSNAP_DONT): | ||
58 | return cls.BORGSNAP_DONT | ||
59 | case (cls.BORGSNAP_KEEP, _): | ||
60 | return cls.BORGSNAP_KEEP | ||
61 | case (_, cls.BORGSNAP_KEEP): | ||
62 | return cls.BORGSNAP_KEEP | ||
63 | case other: | ||
64 | return cls.BORGSNAP_DO | ||
65 | |||
66 | def returncode(self): | ||
67 | match self: | ||
68 | case self.__class__.BORGSNAP_DO: | ||
69 | return 126 | ||
70 | case self.__class__.BORGSNAP_KEEP: | ||
71 | return 125 | ||
72 | case self.__class__.BORGSNAP_DONT: | ||
73 | return 124 | ||
74 | |||
75 | borg_pwd = getpwnam('borg') | ||
76 | |||
77 | def as_borg(caps=set()): | ||
78 | global logger | ||
79 | |||
80 | try: | ||
81 | if caps: | ||
82 | c_state = CapState.get_current() | ||
83 | c_state.permitted.add(*caps) | ||
84 | c_state.set_current() | ||
85 | |||
86 | logger.debug("before setgid/setuid: cap_permitted=%s", CapState.get_current().permitted) | ||
87 | |||
88 | set_keepcaps(True) | ||
89 | |||
90 | os.setgid(borg_pwd.pw_gid) | ||
91 | os.setuid(borg_pwd.pw_uid) | ||
92 | |||
93 | if caps: | ||
94 | logger.debug("after setgid/setuid: cap_permitted=%s", CapState.get_current().permitted) | ||
95 | |||
96 | c_state = CapState.get_current() | ||
97 | c_state.permitted = caps.copy() | ||
98 | c_state.inheritable.add(*caps) | ||
99 | c_state.set_current() | ||
100 | |||
101 | logger.debug("cap_permitted=%s", CapState.get_current().permitted) | ||
102 | logger.debug("cap_inheritable=%s", CapState.get_current().inheritable) | ||
103 | |||
104 | for cap in caps: | ||
105 | cap_ambient_raise(cap) | ||
106 | logger.debug("cap_ambient[%s]=%s", cap, cap_ambient_is_set(cap)) | ||
107 | except Exception: | ||
108 | logger.error(format_exc()) | ||
109 | raise | ||
110 | |||
111 | |||
112 | def _archive_name(snapshot, target, archive_prefix): | ||
113 | _, _, ts = snapshot.rpartition('@') | ||
114 | creation_time = isoparse(ts).astimezone(timezone.utc) | ||
115 | archive_name = _archive_basename(snapshot, archive_prefix) | ||
116 | return f'{target}::{archive_name}-{creation_time.strftime("%Y-%m-%dT%H:%M:%S")}' | ||
117 | |||
118 | def _archive_basename(snapshot, archive_prefix): | ||
119 | base_name, _, _ = snapshot.rpartition('@') | ||
120 | return archive_prefix + base_name.replace('-', '--').replace('/', '-') | ||
121 | |||
122 | def check(*, snapshot, target, archive_prefix, cache_file): | ||
123 | global logger | ||
124 | |||
125 | archives = None | ||
126 | if cache_file: | ||
127 | logger.debug('Trying cache...') | ||
128 | try: | ||
129 | with open(cache_file, mode='r', encoding='utf-8') as fp: | ||
130 | archives = set(json.load(fp)) | ||
131 | logger.debug('Loaded archive list from cache') | ||
132 | except FileNotFoundError: | ||
133 | pass | ||
134 | |||
135 | if not archives: | ||
136 | logger.info('Loading archive list from remote...') | ||
137 | with subprocess.Popen(['borg', 'list', '--info', '--lock-wait=600', '--json', target], stdout=subprocess.PIPE, preexec_fn=lambda: as_borg()) as proc: | ||
138 | archives = set([archive['barchive'] for archive in json.load(proc.stdout)['archives']]) | ||
139 | if cache_file: | ||
140 | logger.debug('Saving archive list to cache...') | ||
141 | with atomic_write(cache_file, mode='w', encoding='utf-8', overwrite=True) as fp: | ||
142 | json.dump(list(archives), fp) | ||
143 | |||
144 | # logger.debug(f'archives: {archives}') | ||
145 | _, _, archive_name = _archive_name(snapshot, target, archive_prefix).partition('::') | ||
146 | if archive_name in archives: | ||
147 | logger.info('‘%s’ found', archive_name) | ||
148 | return 0 | ||
149 | else: | ||
150 | logger.info('‘%s’ not found', archive_name) | ||
151 | |||
152 | logger.debug('Checking %s for ‘%s’...', PROP_DO_BORGSNAP, snapshot) | ||
153 | intent = DoValue.BORGSNAP_DO | ||
154 | p = subprocess.run(['zfs', 'get', '-H', '-p', '-o', 'name,value', PROP_DO_BORGSNAP, snapshot], stdout=subprocess.PIPE, text=True, check=True) | ||
155 | reader = csv.DictReader(io.StringIO(p.stdout), fieldnames=['name', 'value'], delimiter='\t', quoting=csv.QUOTE_NONE) | ||
156 | Row = namedtuple('Row', reader.fieldnames) | ||
157 | for row in [Row(**data) for data in reader]: | ||
158 | if not row.value or row.value == '-': | ||
159 | continue | ||
160 | |||
161 | logger.debug('%s=%s (parsed as %s) for ‘%s’...', PROP_DO_BORGSNAP, row.value, DoValue.from_prop(row.value), row.name) | ||
162 | intent = DoValue.merge(intent, DoValue.from_prop(row.value)) | ||
163 | |||
164 | match intent: | ||
165 | case DoValue.BORGSNAP_DONT: | ||
166 | logger.warn('%s specifies to ignore, returning accordingly...', PROP_DO_BORGSNAP) | ||
167 | case DoValue.BORGSNAP_KEEP: | ||
168 | logger.info('%s specifies to ignore but keep, returning accordingly...', PROP_DO_BORGSNAP) | ||
169 | case other: | ||
170 | pass | ||
171 | |||
172 | return intent.returncode() | ||
173 | |||
174 | def create(*, snapshot, target, archive_prefix, dry_run): | ||
175 | global logger | ||
176 | |||
177 | basename = _archive_basename(snapshot, archive_prefix) | ||
178 | |||
179 | def do_create(tmpdir_q): | ||
180 | global logger | ||
181 | nonlocal basename, snapshot, target, archive_prefix, dry_run | ||
182 | |||
183 | tmpdir = tmpdir_q.get() | ||
184 | |||
185 | unshare.unshare(unshare.CLONE_NEWNS) | ||
186 | subprocess.run(['mount', '--make-rprivate', '/'], check=True) | ||
187 | chroot = pathlib.Path(tmpdir) / 'chroot' | ||
188 | upper = pathlib.Path(tmpdir) / 'upper' | ||
189 | work = pathlib.Path(tmpdir) / 'work' | ||
190 | for path in [chroot,upper,work]: | ||
191 | path.mkdir() | ||
192 | subprocess.run(['mount', '-t', 'overlay', 'overlay', '-o', f'lowerdir=/,upperdir={upper},workdir={work}', chroot], check=True) | ||
193 | bindMounts = ['nix', 'run', 'run/secrets.d', 'run/wrappers', 'proc', 'dev', 'sys', pathlib.Path(os.path.expanduser('~')).relative_to('/')] | ||
194 | if borg_base_dir := os.getenv('BORG_BASE_DIR'): | ||
195 | bindMounts.append(pathlib.Path(borg_base_dir).relative_to('/')) | ||
196 | if ssh_auth_sock := os.getenv('SSH_AUTH_SOCK'): | ||
197 | bindMounts.append(pathlib.Path(ssh_auth_sock).parent.relative_to('/')) | ||
198 | for bindMount in bindMounts: | ||
199 | (chroot / bindMount).mkdir(parents=True,exist_ok=True) | ||
200 | subprocess.run(['mount', '--bind', pathlib.Path('/') / bindMount, chroot / bindMount], check=True) | ||
201 | |||
202 | os.chroot(chroot) | ||
203 | os.chdir('/') | ||
204 | dir = pathlib.Path('/borg') | ||
205 | dir.mkdir(parents=True,exist_ok=True,mode=0o0750) | ||
206 | os.chown(dir, borg_pwd.pw_uid, borg_pwd.pw_gid) | ||
207 | |||
208 | base_name, _, _ = snapshot.rpartition('@') | ||
209 | type_val = subprocess.run(['zfs', 'get', '-H', '-p', '-o', 'value', 'type', base_name], stdout=subprocess.PIPE, text=True, check=True).stdout.strip() | ||
210 | match type_val: | ||
211 | case 'filesystem': | ||
212 | subprocess.run(['mount', '-t', 'zfs', '-o', 'ro', snapshot, dir], check=True) | ||
213 | case 'volume': | ||
214 | snapdev_val = subprocess.run(['zfs', 'get', '-H', '-p', '-o', 'value', 'snapdev', base_name], stdout=subprocess.PIPE, text=True, check=True).stdout.strip() | ||
215 | try: | ||
216 | if snapdev_val == 'hidden': | ||
217 | subprocess.run(['zfs', 'set', 'snapdev=visible', base_name], check=True) | ||
218 | subprocess.run(['mount', '-t', 'auto', '-o', 'ro', Path('/dev/zvol') / snapshot, dir], check=True) | ||
219 | finally: | ||
220 | if snapdev_val == 'hidden': | ||
221 | subprocess.run(['zfs', 'inherit', 'snapdev', base_name], check=True) | ||
222 | case other: | ||
223 | raise ValueError(f'‘{base_name}’ is of type ‘{type_val}’') | ||
224 | |||
225 | env = os.environ.copy() | ||
226 | create_args = ['borg', | ||
227 | 'create', | ||
228 | '--lock-wait=600', | ||
229 | '--one-file-system', | ||
230 | '--exclude-caches', | ||
231 | '--keep-exclude-tags', | ||
232 | '--compression=auto,zstd,10', | ||
233 | '--chunker-params=10,23,16,4095', | ||
234 | '--files-cache=ctime,size', | ||
235 | '--show-rc', | ||
236 | '--upload-buffer=100', | ||
237 | '--progress', | ||
238 | '--list', | ||
239 | '--filter=AMEi-x?', | ||
240 | '--stats' if not dry_run else '--dry-run', | ||
241 | ] | ||
242 | _, _, ts = snapshot.rpartition('@') | ||
243 | creation_time = isoparse(ts).astimezone(timezone.utc) | ||
244 | create_args += [f'--timestamp={creation_time.strftime("%Y-%m-%dT%H:%M:%S")}'] | ||
245 | env['BORG_FILES_CACHE_SUFFIX'] = basename | ||
246 | archive_name = _archive_name(snapshot, target, archive_prefix) | ||
247 | target_host, _, target_path = target.rpartition(':') | ||
248 | parents_init = list() | ||
249 | if Path(target_path).parents: | ||
250 | *parents_init, _ = list(Path(target_path).parents) | ||
251 | backup_patterns = [*(map(lambda p: Path('.backup') / f'{target_host}:{p}', [Path(target_path), *parents_init])), Path('.backup') / target_host, Path('.backup')] | ||
252 | for pattern_file in backup_patterns: | ||
253 | if (dir / pattern_file).is_file(): | ||
254 | logger.debug('Found backup patterns at ‘%s’', dir / pattern_file) | ||
255 | create_args += [f'--patterns-from={pattern_file}', archive_name] | ||
256 | break | ||
257 | elif (dir / pattern_file).exists(): | ||
258 | logger.warn('‘%s’ exists but is no file', dir / pattern_file) | ||
259 | else: | ||
260 | logger.debug('No backup patterns exist, checked %s', list(map(lambda pattern_file: str(dir / pattern_file), backup_patterns))) | ||
261 | create_args += [archive_name, '.'] | ||
262 | logger.debug('%s', {'create_args': create_args, 'cwd': dir, 'env': env}) | ||
263 | |||
264 | with subprocess.Popen(create_args, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, preexec_fn=lambda: as_borg(caps={Cap.DAC_READ_SEARCH}), cwd=dir, text=True) as proc: | ||
265 | proc_logger = logger.getChild('borg') | ||
266 | stdout_logger = proc_logger.getChild('stdout') | ||
267 | stderr_logger = proc_logger.getChild('stderr') | ||
268 | |||
269 | poll = select.poll() | ||
270 | poll.register(proc.stdout, select.POLLIN | select.POLLHUP) | ||
271 | poll.register(proc.stderr, select.POLLIN | select.POLLHUP) | ||
272 | pollc = 2 | ||
273 | events = poll.poll() | ||
274 | while pollc > 0 and len(events) > 0: | ||
275 | for rfd, event in events: | ||
276 | if event & select.POLLIN: | ||
277 | if rfd == proc.stdout.fileno(): | ||
278 | if line := proc.stdout.readline(): | ||
279 | stdout_logger.info(line[:-1]) | ||
280 | if rfd == proc.stderr.fileno(): | ||
281 | if line := proc.stderr.readline(): | ||
282 | stderr_logger.info(line[:-1]) | ||
283 | if event & select.POLLHUP: | ||
284 | poll.unregister(rfd) | ||
285 | pollc -= 1 | ||
286 | |||
287 | if pollc > 0: | ||
288 | events = poll.poll() | ||
289 | |||
290 | for handler in proc_logger.handlers: | ||
291 | handler.flush() | ||
292 | |||
293 | ret = proc.wait() | ||
294 | if ret != 0: | ||
295 | raise Exception(f'borg subprocess exited with returncode {ret}') | ||
296 | |||
297 | with Manager() as manager: | ||
298 | tmpdir_q = manager.Queue(1) | ||
299 | with closing(Process(target=do_create, args=(tmpdir_q,), name='do_create')) as p: | ||
300 | p.start() | ||
301 | |||
302 | with TemporaryDirectory(prefix=f'borg-mount_{basename}_', dir=os.getenv('RUNTIME_DIRECTORY')) as tmpdir: | ||
303 | tmpdir_q.put(tmpdir) | ||
304 | p.join() | ||
305 | if p.exitcode == 0 and dry_run: | ||
306 | return 125 | ||
307 | return p.exitcode | ||
308 | |||
309 | def sigterm(signum, frame): | ||
310 | raise SystemExit(128 + signum) | ||
311 | |||
312 | def main(): | ||
313 | signal.signal(signal.SIGTERM, sigterm) | ||
314 | |||
315 | global logger | ||
316 | logger = logging.getLogger(__name__) | ||
317 | console_handler = logging.StreamHandler() | ||
318 | console_handler.setFormatter( logging.Formatter('[%(levelname)s](%(name)s): %(message)s') ) | ||
319 | if sys.stderr.isatty(): | ||
320 | console_handler.setFormatter( logging.Formatter('%(asctime)s [%(levelname)s](%(name)s): %(message)s') ) | ||
321 | |||
322 | burst_max = 10000 | ||
323 | burst = burst_max | ||
324 | last_use = None | ||
325 | inv_rate = 1e6 | ||
326 | def consume_filter(record): | ||
327 | nonlocal burst, burst_max, inv_rate, last_use | ||
328 | |||
329 | delay = None | ||
330 | while True: | ||
331 | now = time.monotonic_ns() | ||
332 | burst = min(burst_max, burst + math.floor((now - last_use) / inv_rate)) if last_use else burst_max | ||
333 | last_use = now | ||
334 | |||
335 | if burst > 0: | ||
336 | burst -= 1 | ||
337 | if delay: | ||
338 | delay = now - delay | ||
339 | |||
340 | return True | ||
341 | |||
342 | if delay is None: | ||
343 | delay = now | ||
344 | time.sleep(inv_rate / 1e9) | ||
345 | console_handler.addFilter(consume_filter) | ||
346 | |||
347 | logger.addHandler(console_handler) | ||
348 | |||
349 | # log uncaught exceptions | ||
350 | def log_exceptions(type, value, tb): | ||
351 | global logger | ||
352 | |||
353 | logger.error(value) | ||
354 | sys.__excepthook__(type, value, tb) # calls default excepthook | ||
355 | |||
356 | sys.excepthook = log_exceptions | ||
357 | |||
358 | parser = argparse.ArgumentParser(prog='borgsnap') | ||
359 | parser.add_argument('--verbosity', dest='log_level', action='append', type=int) | ||
360 | parser.add_argument('--verbose', '-v', dest='log_level', action='append_const', const=1) | ||
361 | parser.add_argument('--quiet', '-q', dest='log_level', action='append_const', const=-1) | ||
362 | parser.add_argument('--target', metavar='REPO', default='yggdrasil.borgbase:repo') | ||
363 | parser.add_argument('--archive-prefix', metavar='REPO', default='yggdrasil.vidhar.') | ||
364 | subparsers = parser.add_subparsers() | ||
365 | subparsers.required = True | ||
366 | parser.set_defaults(cmd=None) | ||
367 | check_parser = subparsers.add_parser('check') | ||
368 | check_parser.add_argument('--cache-file', type=lambda p: Path(p).absolute(), default=None) | ||
369 | check_parser.add_argument('snapshot') | ||
370 | check_parser.set_defaults(cmd=check) | ||
371 | create_parser = subparsers.add_parser('create') | ||
372 | create_parser.add_argument('--dry-run', '-n', action='store_true', default=False) | ||
373 | create_parser.add_argument('snapshot') | ||
374 | create_parser.set_defaults(cmd=create) | ||
375 | args = parser.parse_args() | ||
376 | |||
377 | LOG_LEVELS = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL] | ||
378 | DEFAULT_LOG_LEVEL = logging.ERROR | ||
379 | log_level = LOG_LEVELS.index(DEFAULT_LOG_LEVEL) | ||
380 | |||
381 | for adjustment in args.log_level or (): | ||
382 | log_level = min(len(LOG_LEVELS) - 1, max(log_level - adjustment, 0)) | ||
383 | logger.setLevel(LOG_LEVELS[log_level]) | ||
384 | |||
385 | cmdArgs = {} | ||
386 | for copy in {'target', 'archive_prefix', 'snapshot', 'cache_file', 'dry_run'}: | ||
387 | if copy in vars(args): | ||
388 | cmdArgs[copy] = vars(args)[copy] | ||
389 | |||
390 | return args.cmd(**cmdArgs) | ||
391 | |||
392 | |||
393 | if __name__ == '__main__': | ||
394 | sys.exit(main()) | ||
diff --git a/modules/borgsnap/borgsnap/setup.py b/modules/borgsnap/borgsnap/setup.py deleted file mode 100644 index 76356bfc..00000000 --- a/modules/borgsnap/borgsnap/setup.py +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | from setuptools import setup | ||
2 | |||
3 | setup(name='borgsnap', | ||
4 | packages=['borgsnap'], | ||
5 | entry_points={ | ||
6 | 'console_scripts': [ | ||
7 | 'borgsnap=borgsnap.__main__:main', | ||
8 | ], | ||
9 | } | ||
10 | ) | ||
diff --git a/modules/borgsnap/default.nix b/modules/borgsnap/default.nix deleted file mode 100644 index 0a674e64..00000000 --- a/modules/borgsnap/default.nix +++ /dev/null | |||
@@ -1,116 +0,0 @@ | |||
1 | { config, pkgs, lib, flakeInputs, hostName, ... }: | ||
2 | |||
3 | with lib; | ||
4 | |||
5 | let | ||
6 | borgsnap = flakeInputs.mach-nix.lib.${config.nixpkgs.system}.buildPythonPackage rec { | ||
7 | pname = "borgsnap"; | ||
8 | src = ./borgsnap; | ||
9 | version = "0.0.0"; | ||
10 | ignoreDataOutdated = true; | ||
11 | |||
12 | requirements = '' | ||
13 | atomicwrites | ||
14 | pyprctl | ||
15 | python-unshare | ||
16 | python-dateutil | ||
17 | ''; | ||
18 | postInstall = '' | ||
19 | wrapProgram $out/bin/borgsnap \ | ||
20 | --prefix PATH : ${makeBinPath (with pkgs; [config.boot.zfs.package util-linux borgbackup])}:${config.security.wrapperDir} | ||
21 | ''; | ||
22 | |||
23 | providers.python-unshare = "nixpkgs"; | ||
24 | overridesPre = [ | ||
25 | (self: super: { python-unshare = super.python-unshare.overrideAttrs (oldAttrs: { name = "python-unshare-0.2.1"; version = "0.2.1"; }); }) | ||
26 | ]; | ||
27 | |||
28 | _.tomli.buildInputs.add = with pkgs."python3Packages"; [ flit-core ]; | ||
29 | }; | ||
30 | |||
31 | cfg = config.services.borgsnap; | ||
32 | in { | ||
33 | options = { | ||
34 | services.borgsnap = { | ||
35 | enable = mkEnableOption "borgsnap service"; | ||
36 | |||
37 | target = mkOption { | ||
38 | type = types.str; | ||
39 | }; | ||
40 | |||
41 | archive-prefix = mkOption { | ||
42 | type = types.str; | ||
43 | default = "yggdrasil.${hostName}."; | ||
44 | }; | ||
45 | |||
46 | extraConfig = mkOption { | ||
47 | type = with types; attrsOf str; | ||
48 | default = { | ||
49 | halfweekly = "8"; | ||
50 | monthly = "-1"; | ||
51 | }; | ||
52 | }; | ||
53 | |||
54 | verbosity = mkOption { | ||
55 | type = types.int; | ||
56 | default = config.services.zfssnap.verbosity; | ||
57 | }; | ||
58 | |||
59 | sshConfig = mkOption { | ||
60 | type = with types; nullOr str; | ||
61 | default = null; | ||
62 | }; | ||
63 | |||
64 | keyfile = mkOption { | ||
65 | type = with types; nullOr str; | ||
66 | default = null; | ||
67 | }; | ||
68 | |||
69 | extraCreateArgs = mkOption { | ||
70 | type = with types; listOf str; | ||
71 | default = []; | ||
72 | }; | ||
73 | extraCheckArgs = mkOption { | ||
74 | type = with types; listOf str; | ||
75 | default = []; | ||
76 | }; | ||
77 | |||
78 | unknownUnencryptedRepoAccessOk = mkOption { | ||
79 | type = types.bool; | ||
80 | default = false; | ||
81 | }; | ||
82 | hostnameIsUnique = mkOption { | ||
83 | type = types.bool; | ||
84 | default = true; | ||
85 | }; | ||
86 | }; | ||
87 | }; | ||
88 | |||
89 | config = mkIf cfg.enable { | ||
90 | warnings = mkIf (!config.services.zfssnap.enable) [ | ||
91 | "borgsnap will do nothing if zfssnap is not enabled" | ||
92 | ]; | ||
93 | |||
94 | services.zfssnap.config.exec = { | ||
95 | check = "${borgsnap}/bin/borgsnap --verbosity=${toString cfg.verbosity} --target ${escapeShellArg cfg.target} --archive-prefix ${escapeShellArg cfg.archive-prefix} check --cache-file /run/zfssnap-prune/archives-cache.json ${escapeShellArgs cfg.extraCheckArgs}"; | ||
96 | cmd = "${borgsnap}/bin/borgsnap --verbosity=${toString cfg.verbosity} --target ${escapeShellArg cfg.target} --archive-prefix ${escapeShellArg cfg.archive-prefix} create ${escapeShellArgs cfg.extraCreateArgs}"; | ||
97 | } // cfg.extraConfig; | ||
98 | |||
99 | systemd.services."zfssnap-prune" = { | ||
100 | serviceConfig = { | ||
101 | Environment = [ | ||
102 | "BORG_BASE_DIR=/var/lib/borg" | ||
103 | "BORG_CONFIG_DIR=/var/lib/borg/config" | ||
104 | "BORG_CACHE_DIR=/var/lib/borg/cache" | ||
105 | "BORG_SECURITY_DIR=/var/lib/borg/security" | ||
106 | "BORG_KEYS_DIR=/var/lib/borg/keys" | ||
107 | ] | ||
108 | ++ optional cfg.unknownUnencryptedRepoAccessOk "BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes" | ||
109 | ++ optional cfg.hostnameIsUnique "BORG_HOSTNAME_IS_UNIQUE=yes" | ||
110 | ++ optional (!(isNull cfg.sshConfig)) "BORG_RSH=\"${pkgs.openssh}/bin/ssh -F ${pkgs.writeText "config" cfg.sshConfig}\"" | ||
111 | ++ optional (!(isNull cfg.keyfile)) "BORG_KEY_FILE=${cfg.keyfile}"; | ||
112 | RuntimeDirectory = "zfssnap-prune"; | ||
113 | }; | ||
114 | }; | ||
115 | }; | ||
116 | } | ||
diff --git a/modules/zfssnap/default.nix b/modules/zfssnap/default.nix deleted file mode 100644 index 23041c36..00000000 --- a/modules/zfssnap/default.nix +++ /dev/null | |||
@@ -1,113 +0,0 @@ | |||
1 | { config, pkgs, lib, flakeInputs, ... }: | ||
2 | |||
3 | with lib; | ||
4 | |||
5 | let | ||
6 | zfssnap = flakeInputs.mach-nix.lib.${config.nixpkgs.system}.buildPythonPackage rec { | ||
7 | pname = "zfssnap"; | ||
8 | src = ./zfssnap; | ||
9 | version = "0.0.0"; | ||
10 | ignoreDataOutdated = true; | ||
11 | |||
12 | requirements = '' | ||
13 | pyxdg | ||
14 | pytimeparse | ||
15 | python-dateutil | ||
16 | ''; | ||
17 | postInstall = '' | ||
18 | wrapProgram $out/bin/zfssnap \ | ||
19 | --prefix PATH : ${makeBinPath [config.boot.zfs.package]} | ||
20 | ''; | ||
21 | }; | ||
22 | |||
23 | cfg = config.services.zfssnap; | ||
24 | in { | ||
25 | options = { | ||
26 | services.zfssnap = { | ||
27 | enable = mkEnableOption "zfssnap service"; | ||
28 | |||
29 | config = mkOption { | ||
30 | type = types.submodule { | ||
31 | options = { | ||
32 | keep = mkOption { | ||
33 | type = with types; attrsOf str; | ||
34 | default = { | ||
35 | within = "15m"; | ||
36 | "5m" = "48"; | ||
37 | "15m" = "32"; | ||
38 | hourly = "48"; | ||
39 | "4h" = "24"; | ||
40 | "12h" = "12"; | ||
41 | daily = "62"; | ||
42 | halfweekly = "32"; | ||
43 | weekly = "24"; | ||
44 | monthly = "-1"; | ||
45 | }; | ||
46 | }; | ||
47 | exec = mkOption { | ||
48 | type = with types; attrsOf str; | ||
49 | default = {}; | ||
50 | }; | ||
51 | }; | ||
52 | }; | ||
53 | }; | ||
54 | |||
55 | snapInterval = mkOption { | ||
56 | type = types.str; | ||
57 | default = "*-*-* *:00/5:00"; | ||
58 | }; | ||
59 | |||
60 | verbosity = mkOption { | ||
61 | type = types.int; | ||
62 | default = 2; | ||
63 | }; | ||
64 | |||
65 | extraPruneArgs = mkOption { | ||
66 | type = with types; listOf str; | ||
67 | default = []; | ||
68 | }; | ||
69 | extraAutosnapArgs = mkOption { | ||
70 | type = with types; listOf str; | ||
71 | default = []; | ||
72 | }; | ||
73 | }; | ||
74 | }; | ||
75 | |||
76 | config = mkIf cfg.enable { | ||
77 | systemd.services."zfssnap" = { | ||
78 | description = "Create automatic ZFS snapshots"; | ||
79 | after = [ "zfs-import.target" ]; | ||
80 | wants = [ "zfssnap-prune.service" ]; | ||
81 | before = [ "zfssnap-prune.service" ]; | ||
82 | serviceConfig = { | ||
83 | Type = "oneshot"; | ||
84 | ExecStart = "${zfssnap}/bin/zfssnap --verbosity=${toString cfg.verbosity} autosnap ${escapeShellArgs cfg.extraAutosnapArgs}"; | ||
85 | |||
86 | LogRateLimitIntervalSec = 0; | ||
87 | }; | ||
88 | }; | ||
89 | systemd.services."zfssnap-prune" = { | ||
90 | description = "Prune automatic ZFS snapshots"; | ||
91 | after = [ "zfs-import.target" "zfssnap.service" ]; | ||
92 | serviceConfig = { | ||
93 | Type = "oneshot"; | ||
94 | ExecStart = let | ||
95 | mkSectionName = name: strings.escape [ "[" "]" ] (strings.toUpper name); | ||
96 | zfssnapConfig = generators.toINI { inherit mkSectionName; } cfg.config; | ||
97 | in "${zfssnap}/bin/zfssnap --verbosity=${toString cfg.verbosity} prune --config=${pkgs.writeText "zfssnap.ini" zfssnapConfig} ${escapeShellArgs cfg.extraPruneArgs}"; | ||
98 | |||
99 | LogRateLimitIntervalSec = 0; | ||
100 | }; | ||
101 | }; | ||
102 | |||
103 | systemd.timers."zfssnap" = { | ||
104 | wantedBy = ["timers.target"]; | ||
105 | timerConfig = { | ||
106 | OnCalendar = cfg.snapInterval; | ||
107 | Persistent = true; | ||
108 | }; | ||
109 | }; | ||
110 | |||
111 | environment.systemPackages = [zfssnap]; | ||
112 | }; | ||
113 | } | ||
diff --git a/modules/zfssnap/zfssnap/setup.py b/modules/zfssnap/zfssnap/setup.py deleted file mode 100644 index 6c58757d..00000000 --- a/modules/zfssnap/zfssnap/setup.py +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | from setuptools import setup | ||
2 | |||
3 | setup(name='zfssnap', | ||
4 | packages=['zfssnap'], | ||
5 | entry_points={ | ||
6 | 'console_scripts': [ | ||
7 | 'zfssnap=zfssnap.__main__:main', | ||
8 | ], | ||
9 | } | ||
10 | ) | ||
diff --git a/modules/zfssnap/zfssnap/zfssnap/__main__.py b/modules/zfssnap/zfssnap/zfssnap/__main__.py deleted file mode 100644 index 2ff8b309..00000000 --- a/modules/zfssnap/zfssnap/zfssnap/__main__.py +++ /dev/null | |||
@@ -1,438 +0,0 @@ | |||
1 | import csv | ||
2 | import subprocess | ||
3 | import io | ||
4 | from distutils.util import strtobool | ||
5 | from datetime import datetime, timezone, timedelta | ||
6 | from dateutil.tz import gettz, tzutc | ||
7 | import pytimeparse | ||
8 | import argparse | ||
9 | import re | ||
10 | |||
11 | import sys | ||
12 | |||
13 | import logging | ||
14 | |||
15 | import shlex | ||
16 | |||
17 | from collections import defaultdict, OrderedDict, deque, namedtuple | ||
18 | |||
19 | import configparser | ||
20 | from xdg import BaseDirectory | ||
21 | |||
22 | from functools import cache | ||
23 | |||
24 | from math import floor | ||
25 | |||
26 | import asyncio | ||
27 | |||
28 | from dataclasses import dataclass | ||
29 | |||
30 | |||
31 | TIME_PATTERNS = OrderedDict([ | ||
32 | ("secondly", lambda t: t.strftime('%Y-%m-%d %H:%M:%S')), | ||
33 | ("minutely", lambda t: t.strftime('%Y-%m-%d %H:%M')), | ||
34 | ("5m", lambda t: (t.strftime('%Y-%m-%d %H'), floor(t.minute / 5) * 5)), | ||
35 | ("15m", lambda t: (t.strftime('%Y-%m-%d %H'), floor(t.minute / 15) * 15)), | ||
36 | ("hourly", lambda t: t.strftime('%Y-%m-%d %H')), | ||
37 | ("4h", lambda t: (t.strftime('%Y-%m-%d'), floor(t.hour / 4) * 4)), | ||
38 | ("12h", lambda t: (t.strftime('%Y-%m-%d'), floor(t.hour / 12) * 12)), | ||
39 | ("daily", lambda t: t.strftime('%Y-%m-%d')), | ||
40 | ("halfweekly", lambda t: (t.strftime('%G-%V'), floor(int(t.strftime('%u')) / 4) * 4)), | ||
41 | ("weekly", lambda t: t.strftime('%G-%V')), | ||
42 | ("monthly", lambda t: t.strftime('%Y-%m')), | ||
43 | ("yearly", lambda t: t.strftime('%Y')), | ||
44 | ]) | ||
45 | |||
46 | PROP_DO_AUTO_SNAPSHOT = 'li.yggdrasil:auto-snapshot' | ||
47 | PROP_IS_AUTO_SNAPSHOT = 'li.yggdrasil:is-auto-snapshot' | ||
48 | |||
49 | @dataclass(eq=True, order=True, frozen=True) | ||
50 | class Snap: | ||
51 | name: str | ||
52 | creation: datetime | ||
53 | |||
54 | @dataclass(eq=True, order=True, frozen=True) | ||
55 | class KeptBecause: | ||
56 | rule: str | ||
57 | ix: int | ||
58 | base: str | ||
59 | period: str | ||
60 | |||
61 | |||
62 | @cache | ||
63 | def _now(): | ||
64 | return datetime.now(timezone.utc) | ||
65 | |||
66 | def _snap_name(item, time=_now()): | ||
67 | suffix = re.sub(r'\+00:00$', r'Z', time.isoformat(timespec='seconds')) | ||
68 | return f'{item}@{suffix}' | ||
69 | |||
70 | def _log_cmd(*args): | ||
71 | fmt_args = ' '.join(map(shlex.quote, args)) | ||
72 | logger.debug('Running command: %s', fmt_args) | ||
73 | |||
74 | def _get_items(): | ||
75 | items = {} | ||
76 | |||
77 | args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', '-t', 'filesystem,volume', PROP_DO_AUTO_SNAPSHOT] | ||
78 | _log_cmd(*args) | ||
79 | with subprocess.Popen(args, stdout=subprocess.PIPE) as proc: | ||
80 | text_stdout = io.TextIOWrapper(proc.stdout) | ||
81 | reader = csv.DictReader(text_stdout, fieldnames=['name', 'value'], delimiter='\t', quoting=csv.QUOTE_NONE) | ||
82 | Row = namedtuple('Row', reader.fieldnames) | ||
83 | for row in [Row(**data) for data in reader]: | ||
84 | if not row.value or row.value == '-': | ||
85 | continue | ||
86 | |||
87 | items[row.name] = bool(strtobool(row.value)) | ||
88 | |||
89 | return items | ||
90 | |||
91 | def _get_snaps(only_auto=True): | ||
92 | snapshots = defaultdict(list) | ||
93 | args = ['zfs', 'list', '-H', '-p', '-t', 'snapshot', '-o', f'name,{PROP_IS_AUTO_SNAPSHOT},creation'] | ||
94 | _log_cmd(*args) | ||
95 | with subprocess.Popen(args, stdout=subprocess.PIPE) as proc: | ||
96 | text_stdout = io.TextIOWrapper(proc.stdout) | ||
97 | reader = csv.DictReader(text_stdout, fieldnames=['name', 'is_auto_snapshot', 'timestamp'], delimiter='\t', quoting=csv.QUOTE_NONE) | ||
98 | Row = namedtuple('Row', reader.fieldnames) | ||
99 | for row in [Row(**data) for data in reader]: | ||
100 | if only_auto and not bool(strtobool(row.is_auto_snapshot)): | ||
101 | continue | ||
102 | |||
103 | base_name, _, _ = row.name.rpartition('@') | ||
104 | creation = datetime.fromtimestamp(int(row.timestamp), timezone.utc) | ||
105 | snapshots[base_name].append(Snap(name=row.name, creation=creation)) | ||
106 | |||
107 | return snapshots | ||
108 | |||
109 | def prune(config, dry_run, keep_newest, do_exec, exec_newest): | ||
110 | do_exec = do_exec and 'EXEC' in config | ||
111 | prune_timezone = config.gettimezone('KEEP', 'timezone', fallback=tzutc()) | ||
112 | logger.debug('prune timezone: %s', prune_timezone) | ||
113 | |||
114 | items = _get_snaps() | ||
115 | |||
116 | kept_count = defaultdict(lambda: defaultdict(lambda: 0)) | ||
117 | kept_because = OrderedDict() | ||
118 | def keep_because(base, snap, rule, period=None): | ||
119 | nonlocal kept_count, kept_because | ||
120 | kept_count[rule][base] += 1 | ||
121 | if snap not in kept_because: | ||
122 | kept_because[snap] = deque() | ||
123 | kept_because[snap].append(KeptBecause(rule=rule, ix=kept_count[rule][base], base=base, period=period)) | ||
124 | |||
125 | exec_candidates = set() | ||
126 | if do_exec: | ||
127 | exec_timezone = config.gettimezone('EXEC', 'timezone', fallback=prune_timezone) | ||
128 | logger.debug('exec timezone: %s', exec_timezone) | ||
129 | |||
130 | for rule, pattern in TIME_PATTERNS.items(): | ||
131 | desired_count = config.getint('EXEC', rule, fallback=0) | ||
132 | |||
133 | for base, snaps in items.items(): | ||
134 | periods = OrderedDict() | ||
135 | |||
136 | for snap in sorted(snaps, key=lambda snap: snap.creation, reverse=exec_newest): | ||
137 | period = pattern(snap.creation.astimezone(exec_timezone)) | ||
138 | if period not in periods: | ||
139 | periods[period] = deque() | ||
140 | periods[period].append(snap) | ||
141 | |||
142 | to_exec = desired_count | ||
143 | ordered_periods = periods.items() if exec_newest else reversed(periods.items()) | ||
144 | for period, period_snaps in ordered_periods: | ||
145 | if to_exec == 0: | ||
146 | break | ||
147 | |||
148 | for snap in period_snaps: | ||
149 | exec_candidates.add(snap) | ||
150 | logger.debug('‘%s’ is exec candidate', snap.name) | ||
151 | to_exec -= 1 | ||
152 | break | ||
153 | |||
154 | if to_exec > 0: | ||
155 | logger.debug('Missing %d to fulfill exec %s=%d for ‘%s’', to_exec, rule, desired_count, base) | ||
156 | |||
157 | check_cmd = config.get('EXEC', 'check', fallback=None) | ||
158 | if check_cmd: | ||
159 | logger.debug('exec_candidates=%s', exec_candidates) | ||
160 | already_execed = set() | ||
161 | for snap in exec_candidates: | ||
162 | logger.debug('checking for ‘%s’...', snap.name) | ||
163 | args = [] | ||
164 | args += shlex.split(check_cmd) | ||
165 | args += [snap.name] | ||
166 | _log_cmd(*args) | ||
167 | check_res = subprocess.run(args) | ||
168 | if check_res.returncode == 0: | ||
169 | already_execed.add(snap) | ||
170 | logger.debug('‘%s’ already execed', snap.name) | ||
171 | elif check_res.returncode == 124: | ||
172 | already_execed.add(snap) | ||
173 | logger.warn('‘%s’ ignored', snap.name) | ||
174 | pass | ||
175 | elif check_res.returncode == 125: | ||
176 | already_execed.add(snap) | ||
177 | logger.info('‘%s’ ignored but specified for keeping, doing so...', snap.name) | ||
178 | base_name, _, _ = snap.name.rpartition('@') | ||
179 | keep_because(base_name, snap.name, 'exec-ignored') | ||
180 | elif check_res.returncode == 126: | ||
181 | logger.debug('‘%s’ to exec', snap.name) | ||
182 | else: | ||
183 | check_res.check_returncode() | ||
184 | exec_candidates -= already_execed | ||
185 | |||
186 | exec_cmd = config.get('EXEC', 'cmd', fallback=None) | ||
187 | exec_count = config.getint('EXEC', 'count', fallback=1) | ||
188 | if exec_cmd: | ||
189 | execed = set() | ||
190 | for snap in sorted(exec_candidates, key=lambda snap: snap.creation): | ||
191 | if exec_count > 0 and len(execed) >= exec_count: | ||
192 | logger.debug('exec_count of %d reached', exec_count) | ||
193 | break | ||
194 | |||
195 | logger.info('execing for ‘%s’...', snap.name) | ||
196 | args = [] | ||
197 | args += shlex.split(exec_cmd) | ||
198 | args += [snap.name] | ||
199 | _log_cmd(*args) | ||
200 | p = subprocess.run(args) | ||
201 | if p.returncode == 125: | ||
202 | logger.warn('got dry-run returncode for ‘%s’, keeping...', snap.name) | ||
203 | base_name, _, _ = snap.name.rpartition('@') | ||
204 | keep_because(base_name, snap.name, 'exec-dryrun') | ||
205 | pass | ||
206 | else: | ||
207 | p.check_returncode() | ||
208 | execed.add(snap) | ||
209 | |||
210 | exec_candidates -= execed | ||
211 | |||
212 | for candidate in exec_candidates: | ||
213 | base_name, _, _ = candidate.name.rpartition('@') | ||
214 | keep_because(base_name, candidate.name, 'exec-candidate') | ||
215 | |||
216 | within = config.gettimedelta('KEEP', 'within') | ||
217 | if within > timedelta(seconds=0): | ||
218 | for base, snaps in items.items(): | ||
219 | time_ref = max(snaps, key=lambda snap: snap.creation, default=None) | ||
220 | if not time_ref: | ||
221 | logger.warn('Nothing to keep for ‘%s’', base) | ||
222 | continue | ||
223 | |||
224 | logger.info('Using ‘%s’ as time reference for ‘%s’', time_ref.name, base) | ||
225 | within_cutoff = time_ref.creation - within | ||
226 | |||
227 | for snap in snaps: | ||
228 | if snap.creation >= within_cutoff: | ||
229 | keep_because(base, snap.name, 'within') | ||
230 | else: | ||
231 | logger.warn('Skipping rule ‘within’ since retention period is zero') | ||
232 | |||
233 | for rule, pattern in TIME_PATTERNS.items(): | ||
234 | desired_count = config.getint('KEEP', rule, fallback=0) | ||
235 | |||
236 | for base, snaps in items.items(): | ||
237 | periods = OrderedDict() | ||
238 | |||
239 | for snap in sorted(snaps, key=lambda snap: snap.creation, reverse=keep_newest): | ||
240 | period = pattern(snap.creation.astimezone(prune_timezone)) | ||
241 | if period not in periods: | ||
242 | periods[period] = deque() | ||
243 | periods[period].append(snap) | ||
244 | |||
245 | to_keep = desired_count | ||
246 | ordered_periods = periods.items() if keep_newest else reversed(periods.items()) | ||
247 | for period, period_snaps in ordered_periods: | ||
248 | if to_keep == 0: | ||
249 | break | ||
250 | |||
251 | for snap in period_snaps: | ||
252 | keep_because(base, snap.name, rule, period=period) | ||
253 | to_keep -= 1 | ||
254 | break | ||
255 | |||
256 | if to_keep > 0: | ||
257 | logger.debug('Missing %d to fulfill prune %s=%d for ‘%s’', to_keep, rule, desired_count, base) | ||
258 | |||
259 | for snap, reasons in kept_because.items(): | ||
260 | logger.info('Keeping ‘%s’ because: %s', snap, ', '.join(map(str, reasons))) | ||
261 | all_snaps = {snap.name for _, snaps in items.items() for snap in snaps} | ||
262 | to_destroy = all_snaps - {*kept_because} | ||
263 | if not to_destroy: | ||
264 | logger.info('Nothing to prune') | ||
265 | |||
266 | for snap in sorted(to_destroy): | ||
267 | args = ['zfs', 'destroy'] | ||
268 | if dry_run: | ||
269 | args += ['-n'] | ||
270 | args += [snap] | ||
271 | _log_cmd(*args) | ||
272 | subprocess.run(args, check=True) | ||
273 | if dry_run: | ||
274 | logger.info('Would have pruned ‘%s’', snap) | ||
275 | else: | ||
276 | logger.info('Pruned ‘%s’', snap) | ||
277 | |||
278 | def rename(snapshots, destroy=False, set_is_auto=False): | ||
279 | args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', 'creation', *snapshots] | ||
280 | _log_cmd(*args) | ||
281 | renamed_to = set() | ||
282 | with subprocess.Popen(args, stdout=subprocess.PIPE) as proc: | ||
283 | text_stdout = io.TextIOWrapper(proc.stdout) | ||
284 | reader = csv.DictReader(text_stdout, fieldnames=['name', 'timestamp'], delimiter='\t', quoting=csv.QUOTE_NONE) | ||
285 | Row = namedtuple('Row', reader.fieldnames) | ||
286 | for row in [Row(**data) for data in reader]: | ||
287 | creation = datetime.fromtimestamp(int(row.timestamp), timezone.utc) | ||
288 | base_name, _, _ = row.name.rpartition('@') | ||
289 | new_name = _snap_name(base_name, time=creation) | ||
290 | if new_name == row.name: | ||
291 | logger.debug('Not renaming ‘%s’ since name is already correct', row.name) | ||
292 | continue | ||
293 | |||
294 | if new_name in renamed_to: | ||
295 | if destroy: | ||
296 | logger.warning('Destroying ‘%s’ since ‘%s’ was already renamed to', row.name, new_name) | ||
297 | args = ['zfs', 'destroy', row.name] | ||
298 | _log_cmd(*args) | ||
299 | subprocess.run(args, check=True) | ||
300 | else: | ||
301 | logger.info('Skipping ‘%s’ since ‘%s’ was already renamed to', row.name, new_name) | ||
302 | |||
303 | continue | ||
304 | |||
305 | logger.info('Renaming ‘%s’ to ‘%s’', row.name, new_name) | ||
306 | args = ['zfs', 'rename', row.name, new_name] | ||
307 | _log_cmd(*args) | ||
308 | subprocess.run(args, check=True) | ||
309 | renamed_to.add(new_name) | ||
310 | |||
311 | if set_is_auto: | ||
312 | logger.info('Setting is-auto-snapshot on ‘%s’', new_name) | ||
313 | args = ['zfs', 'set', f'{PROP_IS_AUTO_SNAPSHOT}=true', new_name] | ||
314 | _log_cmd(*args) | ||
315 | subprocess.run(args, check=True) | ||
316 | |||
317 | def autosnap(): | ||
318 | items = _get_items() | ||
319 | |||
320 | all_snap_names = set() | ||
321 | async def do_snapshot(*snap_items, recursive=False): | ||
322 | nonlocal items, all_snap_names | ||
323 | snap_names = {_snap_name(item) for item in snap_items if items[item]} | ||
324 | if recursive: | ||
325 | for snap_item in snap_items: | ||
326 | all_snap_names |= {_snap_name(item) for item in items if item.startswith(snap_item)} | ||
327 | else: | ||
328 | all_snap_names |= snap_names | ||
329 | |||
330 | args = ['zfs', 'snapshot', '-o', f'{PROP_IS_AUTO_SNAPSHOT}=true'] | ||
331 | if recursive: | ||
332 | args += ['-r'] | ||
333 | args += snap_names | ||
334 | |||
335 | _log_cmd(*args) | ||
336 | subprocess.run(args, check=True) | ||
337 | |||
338 | pool_items = defaultdict(set) | ||
339 | for item in items: | ||
340 | pool, _, _ = item.partition('/') | ||
341 | pool_items[pool].add(item) | ||
342 | |||
343 | tasks = [] | ||
344 | for snap_items in pool_items.values(): | ||
345 | tasks.append(do_snapshot(*snap_items)) | ||
346 | if not tasks: | ||
347 | logger.warning('No snapshots to create') | ||
348 | else: | ||
349 | async def run_tasks(): | ||
350 | await asyncio.gather(*tasks) | ||
351 | asyncio.run(run_tasks()) | ||
352 | for snap in all_snap_names: | ||
353 | logger.info('Created ‘%s’', snap) | ||
354 | if all_snap_names: | ||
355 | rename(snapshots=all_snap_names) | ||
356 | |||
357 | def main(): | ||
358 | global logger | ||
359 | logger = logging.getLogger(__name__) | ||
360 | console_handler = logging.StreamHandler() | ||
361 | console_handler.setFormatter( logging.Formatter('[%(levelname)s](%(name)s): %(message)s') ) | ||
362 | if sys.stderr.isatty(): | ||
363 | console_handler.setFormatter( logging.Formatter('%(asctime)s [%(levelname)s](%(name)s): %(message)s') ) | ||
364 | logger.addHandler(console_handler) | ||
365 | |||
366 | # log uncaught exceptions | ||
367 | def log_exceptions(type, value, tb): | ||
368 | global logger | ||
369 | |||
370 | logger.error(value) | ||
371 | sys.__excepthook__(type, value, tb) # calls default excepthook | ||
372 | |||
373 | sys.excepthook = log_exceptions | ||
374 | |||
375 | parser = argparse.ArgumentParser(prog='zfssnap') | ||
376 | parser.add_argument('--verbosity', dest='log_level', action='append', type=int) | ||
377 | parser.add_argument('--verbose', '-v', dest='log_level', action='append_const', const=1) | ||
378 | parser.add_argument('--quiet', '-q', dest='log_level', action='append_const', const=-1) | ||
379 | subparsers = parser.add_subparsers() | ||
380 | parser.set_defaults(cmd=autosnap) | ||
381 | autosnap_parser = subparsers.add_parser('autosnap') | ||
382 | autosnap_parser.set_defaults(cmd=autosnap) | ||
383 | rename_parser = subparsers.add_parser('rename') | ||
384 | rename_parser.add_argument('snapshots', nargs='+') | ||
385 | rename_parser.add_argument('--destroy', action='store_true', default=False) | ||
386 | rename_parser.add_argument('--set-is-auto', action='store_true', default=False) | ||
387 | rename_parser.set_defaults(cmd=rename) | ||
388 | prune_parser = subparsers.add_parser('prune') | ||
389 | prune_parser.add_argument('--config', '-c', dest='config_files', nargs='*', default=list()) | ||
390 | prune_parser.add_argument('--dry-run', '-n', action='store_true', default=False) | ||
391 | prune_parser.add_argument('--keep-newest', action='store_true', default=False) | ||
392 | prune_parser.add_argument('--exec-newest', action='store_true', default=False) | ||
393 | prune_parser.add_argument('--no-exec', dest='do_exec', action='store_false', default=True) | ||
394 | prune_parser.set_defaults(cmd=prune) | ||
395 | args = parser.parse_args() | ||
396 | |||
397 | |||
398 | LOG_LEVELS = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL] | ||
399 | DEFAULT_LOG_LEVEL = logging.ERROR | ||
400 | log_level = LOG_LEVELS.index(DEFAULT_LOG_LEVEL) | ||
401 | |||
402 | for adjustment in args.log_level or (): | ||
403 | log_level = min(len(LOG_LEVELS) - 1, max(log_level - adjustment, 0)) | ||
404 | logger.setLevel(LOG_LEVELS[log_level]) | ||
405 | |||
406 | cmdArgs = {} | ||
407 | for copy in {'snapshots', 'dry_run', 'destroy', 'keep_newest', 'exec_newest', 'set_is_auto', 'do_exec'}: | ||
408 | if copy in vars(args): | ||
409 | cmdArgs[copy] = vars(args)[copy] | ||
410 | if 'config_files' in vars(args): | ||
411 | def convert_timedelta(secs_str): | ||
412 | secs=pytimeparse.parse(secs_str) | ||
413 | if secs is None: | ||
414 | raise ValueError('Could not parse timedelta expression ‘%s’', secs_str) | ||
415 | return timedelta(seconds=secs) | ||
416 | config = configparser.ConfigParser(converters={ | ||
417 | 'timedelta': convert_timedelta, | ||
418 | 'timezone': gettz | ||
419 | }) | ||
420 | search_files = args.config_files if args.config_files else [*BaseDirectory.load_config_paths('zfssnap.ini')] | ||
421 | read_files = config.read(search_files) | ||
422 | |||
423 | def format_config_files(files): | ||
424 | if not files: | ||
425 | return 'no files' | ||
426 | return ', '.join(map(lambda file: f'‘{file}’', files)) | ||
427 | |||
428 | if not read_files: | ||
429 | raise Exception('Found no config files. Tried: %s', format_config_files(search_files)) | ||
430 | |||
431 | logger.debug('Read following config files: %s', format_config_files(read_files)) | ||
432 | |||
433 | cmdArgs['config'] = config | ||
434 | |||
435 | args.cmd(**cmdArgs) | ||
436 | |||
437 | if __name__ == '__main__': | ||
438 | sys.exit(main()) | ||