diff options
| -rw-r--r-- | hosts/vidhar/borg/borgsnap/borgsnap/__main__.py | 355 | ||||
| -rw-r--r-- | hosts/vidhar/borg/default.nix | 6 | ||||
| -rw-r--r-- | hosts/vidhar/network/bifrost/default.nix | 18 | ||||
| -rw-r--r-- | modules/yggdrasil-wg/default.nix | 4 | ||||
| -rw-r--r-- | modules/zfssnap/default.nix | 42 | ||||
| -rw-r--r-- | modules/zfssnap/zfssnap/setup.py | 10 | ||||
| -rw-r--r-- | modules/zfssnap/zfssnap/zfssnap/__main__.py (renamed from modules/zfssnap/zfssnap.py) | 130 |
7 files changed, 399 insertions, 166 deletions
diff --git a/hosts/vidhar/borg/borgsnap/borgsnap/__main__.py b/hosts/vidhar/borg/borgsnap/borgsnap/__main__.py index e93e6a60..cd2f1ad2 100644 --- a/hosts/vidhar/borg/borgsnap/borgsnap/__main__.py +++ b/hosts/vidhar/borg/borgsnap/borgsnap/__main__.py | |||
| @@ -1,12 +1,11 @@ | |||
| 1 | import argparse | 1 | import argparse |
| 2 | import os, sys, signal | 2 | import os, sys, signal, io |
| 3 | from pyprctl import cap_permitted, cap_inheritable, cap_effective, cap_ambient, Cap | 3 | from pyprctl import CapState, Cap, cap_ambient_raise, cap_ambient_is_set, set_keepcaps |
| 4 | from pwd import getpwnam | 4 | from pwd import getpwnam |
| 5 | 5 | ||
| 6 | from datetime import datetime, timezone | 6 | from datetime import datetime, timezone |
| 7 | from dateutil.parser import isoparse | 7 | from dateutil.parser import isoparse |
| 8 | 8 | ||
| 9 | from xdg import xdg_runtime_dir | ||
| 10 | import unshare | 9 | import unshare |
| 11 | from tempfile import TemporaryDirectory | 10 | from tempfile import TemporaryDirectory |
| 12 | 11 | ||
| @@ -14,6 +13,9 @@ import logging | |||
| 14 | 13 | ||
| 15 | import json | 14 | import json |
| 16 | import subprocess | 15 | import subprocess |
| 16 | import csv | ||
| 17 | from collections import namedtuple | ||
| 18 | from distutils.util import strtobool | ||
| 17 | 19 | ||
| 18 | import pathlib | 20 | import pathlib |
| 19 | from pathlib import Path | 21 | from pathlib import Path |
| @@ -22,21 +24,89 @@ from atomicwrites import atomic_write | |||
| 22 | 24 | ||
| 23 | from traceback import format_exc | 25 | from traceback import format_exc |
| 24 | 26 | ||
| 27 | from multiprocessing import Process, Manager | ||
| 28 | from contextlib import closing | ||
| 29 | |||
| 30 | from enum import Enum, auto | ||
| 31 | |||
| 32 | import select | ||
| 33 | import time | ||
| 34 | import math | ||
| 35 | |||
| 36 | |||
| 37 | PROP_DO_BORGSNAP = 'li.yggdrasil:borgsnap' | ||
| 38 | |||
| 39 | |||
| 40 | class DoValue(Enum): | ||
| 41 | BORGSNAP_DO = auto() | ||
| 42 | BORGSNAP_KEEP = auto() | ||
| 43 | BORGSNAP_DONT = auto() | ||
| 44 | |||
| 45 | @classmethod | ||
| 46 | def from_prop(cls, v: str): | ||
| 47 | if v.lower() == 'keep': | ||
| 48 | return cls.BORGSNAP_KEEP | ||
| 49 | |||
| 50 | return cls.BORGSNAP_DO if not v or bool(strtobool(v)) else cls.BORGSNAP_DONT | ||
| 51 | |||
| 52 | @classmethod | ||
| 53 | def merge(cls, v1, v2): | ||
| 54 | match (v1, v2): | ||
| 55 | case (cls.BORGSNAP_DONT, _): | ||
| 56 | return cls.BORGSNAP_DONT | ||
| 57 | case (_, cls.BORGSNAP_DONT): | ||
| 58 | return cls.BORGSNAP_DONT | ||
| 59 | case (cls.BORGSNAP_KEEP, _): | ||
| 60 | return cls.BORGSNAP_KEEP | ||
| 61 | case (_, cls.BORGSNAP_KEEP): | ||
| 62 | return cls.BORGSNAP_KEEP | ||
| 63 | case other: | ||
| 64 | return cls.BORGSNAP_DO | ||
| 65 | |||
| 66 | def returncode(self): | ||
| 67 | match self: | ||
| 68 | case self.__class__.BORGSNAP_DO: | ||
| 69 | return 126 | ||
| 70 | case self.__class__.BORGSNAP_KEEP: | ||
| 71 | return 125 | ||
| 72 | case self.__class__.BORGSNAP_DONT: | ||
| 73 | return 124 | ||
| 25 | 74 | ||
| 26 | borg_pwd = getpwnam('borg') | 75 | borg_pwd = getpwnam('borg') |
| 27 | 76 | ||
| 28 | def as_borg(caps=set(), cwd=None): | 77 | def as_borg(caps=set()): |
| 29 | if caps: | 78 | global logger |
| 30 | cap_permitted.add(*caps) | 79 | |
| 31 | cap_inheritable.add(*caps) | 80 | try: |
| 32 | cap_effective.add(*caps) | 81 | if caps: |
| 33 | cap_ambient.add(*caps) | 82 | c_state = CapState.get_current() |
| 83 | c_state.permitted.add(*caps) | ||
| 84 | c_state.set_current() | ||
| 85 | |||
| 86 | logger.debug("before setgid/setuid: cap_permitted=%s", CapState.get_current().permitted) | ||
| 87 | |||
| 88 | set_keepcaps(True) | ||
| 89 | |||
| 90 | os.setgid(borg_pwd.pw_gid) | ||
| 91 | os.setuid(borg_pwd.pw_uid) | ||
| 34 | 92 | ||
| 35 | os.setgid(borg_pwd.pw_gid) | 93 | if caps: |
| 36 | os.setuid(borg_pwd.pw_uid) | 94 | logger.debug("after setgid/setuid: cap_permitted=%s", CapState.get_current().permitted) |
| 37 | 95 | ||
| 38 | if cwd is not None: | 96 | c_state = CapState.get_current() |
| 39 | os.chdir(cwd) | 97 | c_state.permitted = caps.copy() |
| 98 | c_state.inheritable.add(*caps) | ||
| 99 | c_state.set_current() | ||
| 100 | |||
| 101 | logger.debug("cap_permitted=%s", CapState.get_current().permitted) | ||
| 102 | logger.debug("cap_inheritable=%s", CapState.get_current().inheritable) | ||
| 103 | |||
| 104 | for cap in caps: | ||
| 105 | cap_ambient_raise(cap) | ||
| 106 | logger.debug("cap_ambient[%s]=%s", cap, cap_ambient_is_set(cap)) | ||
| 107 | except Exception: | ||
| 108 | logger.error(format_exc()) | ||
| 109 | raise | ||
| 40 | 110 | ||
| 41 | 111 | ||
| 42 | def _archive_name(snapshot, target, archive_prefix): | 112 | def _archive_name(snapshot, target, archive_prefix): |
| @@ -50,13 +120,15 @@ def _archive_basename(snapshot, archive_prefix): | |||
| 50 | return archive_prefix + base_name.replace('-', '--').replace('/', '-') | 120 | return archive_prefix + base_name.replace('-', '--').replace('/', '-') |
| 51 | 121 | ||
| 52 | def check(*, snapshot, target, archive_prefix, cache_file): | 122 | def check(*, snapshot, target, archive_prefix, cache_file): |
| 123 | global logger | ||
| 124 | |||
| 53 | archives = None | 125 | archives = None |
| 54 | if cache_file: | 126 | if cache_file: |
| 55 | logger.debug('Trying cache...') | 127 | logger.debug('Trying cache...') |
| 56 | try: | 128 | try: |
| 57 | with open(cache_file, mode='r', encoding='utf-8') as fp: | 129 | with open(cache_file, mode='r', encoding='utf-8') as fp: |
| 58 | archives = set(json.load(fp)) | 130 | archives = set(json.load(fp)) |
| 59 | logger.info('Loaded archive list from cache') | 131 | logger.debug('Loaded archive list from cache') |
| 60 | except FileNotFoundError: | 132 | except FileNotFoundError: |
| 61 | pass | 133 | pass |
| 62 | 134 | ||
| @@ -72,76 +144,165 @@ def check(*, snapshot, target, archive_prefix, cache_file): | |||
| 72 | # logger.debug(f'archives: {archives}') | 144 | # logger.debug(f'archives: {archives}') |
| 73 | _, _, archive_name = _archive_name(snapshot, target, archive_prefix).partition('::') | 145 | _, _, archive_name = _archive_name(snapshot, target, archive_prefix).partition('::') |
| 74 | if archive_name in archives: | 146 | if archive_name in archives: |
| 75 | logger.info(f'{archive_name} found') | 147 | logger.info('‘%s’ found', archive_name) |
| 76 | return 0 | 148 | return 0 |
| 77 | else: | 149 | else: |
| 78 | logger.info(f'{archive_name} not found') | 150 | logger.info('‘%s’ not found', archive_name) |
| 79 | return 126 | 151 | |
| 152 | logger.debug('Checking %s for ‘%s’...', PROP_DO_BORGSNAP, snapshot) | ||
| 153 | intent = DoValue.BORGSNAP_DO | ||
| 154 | p = subprocess.run(['zfs', 'get', '-H', '-p', '-o', 'name,value', PROP_DO_BORGSNAP, snapshot], stdout=subprocess.PIPE, text=True, check=True) | ||
| 155 | reader = csv.DictReader(io.StringIO(p.stdout), fieldnames=['name', 'value'], delimiter='\t', quoting=csv.QUOTE_NONE) | ||
| 156 | Row = namedtuple('Row', reader.fieldnames) | ||
| 157 | for row in [Row(**data) for data in reader]: | ||
| 158 | if not row.value or row.value == '-': | ||
| 159 | continue | ||
| 160 | |||
| 161 | logger.debug('%s=%s (parsed as %s) for ‘%s’...', PROP_DO_BORGSNAP, row.value, DoValue.from_prop(row.value), row.name) | ||
| 162 | intent = DoValue.merge(intent, DoValue.from_prop(row.value)) | ||
| 163 | |||
| 164 | match intent: | ||
| 165 | case DoValue.BORGSNAP_DONT: | ||
| 166 | logger.warn('%s specifies to ignore, returning accordingly...', PROP_DO_BORGSNAP) | ||
| 167 | case DoValue.BORGSNAP_KEEP: | ||
| 168 | logger.info('%s specifies to ignore but keep, returning accordingly...', PROP_DO_BORGSNAP) | ||
| 169 | case other: | ||
| 170 | pass | ||
| 171 | |||
| 172 | return intent.returncode() | ||
| 80 | 173 | ||
| 81 | def create(*, snapshot, target, archive_prefix, dry_run): | 174 | def create(*, snapshot, target, archive_prefix, dry_run): |
| 175 | global logger | ||
| 176 | |||
| 82 | basename = _archive_basename(snapshot, archive_prefix) | 177 | basename = _archive_basename(snapshot, archive_prefix) |
| 83 | 178 | ||
| 84 | with TemporaryDirectory(prefix=f'borg-mount_{basename}_', dir=os.environ.get('RUNTIME_DIRECTORY')) as tmpdir: | 179 | def do_create(tmpdir_q): |
| 85 | child = os.fork() | 180 | global logger |
| 86 | if child == 0: | 181 | nonlocal basename, snapshot, target, archive_prefix, dry_run |
| 87 | unshare.unshare(unshare.CLONE_NEWNS) | 182 | |
| 88 | subprocess.run(['mount', '--make-rprivate', '/'], check=True) | 183 | tmpdir = tmpdir_q.get() |
| 89 | chroot = pathlib.Path(tmpdir) / 'chroot' | 184 | |
| 90 | upper = pathlib.Path(tmpdir) / 'upper' | 185 | unshare.unshare(unshare.CLONE_NEWNS) |
| 91 | work = pathlib.Path(tmpdir) / 'work' | 186 | subprocess.run(['mount', '--make-rprivate', '/'], check=True) |
| 92 | for path in [chroot,upper,work]: | 187 | chroot = pathlib.Path(tmpdir) / 'chroot' |
| 93 | path.mkdir() | 188 | upper = pathlib.Path(tmpdir) / 'upper' |
| 94 | subprocess.run(['mount', '-t', 'overlay', 'overlay', '-o', f'lowerdir=/,upperdir={upper},workdir={work}', chroot], check=True) | 189 | work = pathlib.Path(tmpdir) / 'work' |
| 95 | bindMounts = ['nix', 'run', 'run/secrets.d', 'run/wrappers', 'proc', 'dev', 'sys', pathlib.Path(os.path.expanduser('~')).relative_to('/')] | 190 | for path in [chroot,upper,work]: |
| 96 | if os.environ.get('BORG_BASE_DIR'): | 191 | path.mkdir() |
| 97 | bindMounts.append(pathlib.Path(os.environ['BORG_BASE_DIR']).relative_to('/')) | 192 | subprocess.run(['mount', '-t', 'overlay', 'overlay', '-o', f'lowerdir=/,upperdir={upper},workdir={work}', chroot], check=True) |
| 98 | if 'SSH_AUTH_SOCK' in os.environ: | 193 | bindMounts = ['nix', 'run', 'run/secrets.d', 'run/wrappers', 'proc', 'dev', 'sys', pathlib.Path(os.path.expanduser('~')).relative_to('/')] |
| 99 | bindMounts.append(pathlib.Path(os.environ['SSH_AUTH_SOCK']).parent.relative_to('/')) | 194 | if borg_base_dir := os.getenv('BORG_BASE_DIR'): |
| 100 | for bindMount in bindMounts: | 195 | bindMounts.append(pathlib.Path(borg_base_dir).relative_to('/')) |
| 101 | (chroot / bindMount).mkdir(parents=True,exist_ok=True) | 196 | if ssh_auth_sock := os.getenv('SSH_AUTH_SOCK'): |
| 102 | # print(*['mount', '--bind', pathlib.Path('/') / bindMount, chroot / bindMount], file=stderr) | 197 | bindMounts.append(pathlib.Path(ssh_auth_sock).parent.relative_to('/')) |
| 103 | subprocess.run(['mount', '--bind', pathlib.Path('/') / bindMount, chroot / bindMount], check=True) | 198 | for bindMount in bindMounts: |
| 104 | os.chroot(chroot) | 199 | (chroot / bindMount).mkdir(parents=True,exist_ok=True) |
| 105 | os.chdir('/') | 200 | subprocess.run(['mount', '--bind', pathlib.Path('/') / bindMount, chroot / bindMount], check=True) |
| 106 | dir = pathlib.Path('/borg') | 201 | |
| 107 | dir.mkdir(parents=True,exist_ok=True,mode=0o0750) | 202 | os.chroot(chroot) |
| 108 | os.chown(dir, borg_pwd.pw_uid, borg_pwd.pw_gid) | 203 | os.chdir('/') |
| 109 | try: | 204 | dir = pathlib.Path('/borg') |
| 205 | dir.mkdir(parents=True,exist_ok=True,mode=0o0750) | ||
| 206 | os.chown(dir, borg_pwd.pw_uid, borg_pwd.pw_gid) | ||
| 207 | |||
| 208 | base_name, _, _ = snapshot.rpartition('@') | ||
| 209 | type_val = subprocess.run(['zfs', 'get', '-H', '-p', '-o', 'value', 'type', base_name], stdout=subprocess.PIPE, text=True, check=True).stdout.strip() | ||
| 210 | match type_val: | ||
| 211 | case 'filesystem': | ||
| 110 | subprocess.run(['mount', '-t', 'zfs', '-o', 'ro', snapshot, dir], check=True) | 212 | subprocess.run(['mount', '-t', 'zfs', '-o', 'ro', snapshot, dir], check=True) |
| 111 | env = os.environ.copy() | 213 | case 'volume': |
| 112 | create_args = ['borg', | 214 | snapdev_val = subprocess.run(['zfs', 'get', '-H', '-p', '-o', 'value', 'snapdev', base_name], stdout=subprocess.PIPE, text=True, check=True).stdout.strip() |
| 113 | 'create', | 215 | try: |
| 114 | '--lock-wait=600', | 216 | if snapdev_val == 'hidden': |
| 115 | '--one-file-system', | 217 | subprocess.run(['zfs', 'set', 'snapdev=visible', base_name], check=True) |
| 116 | '--compression=auto,zstd,10', | 218 | subprocess.run(['mount', '-t', 'auto', '-o', 'ro', Path('/dev/zvol') / snapshot, dir], check=True) |
| 117 | '--chunker-params=10,23,16,4095', | 219 | finally: |
| 118 | '--files-cache=ctime,size', | 220 | if snapdev_val == 'hidden': |
| 119 | '--show-rc', | 221 | subprocess.run(['zfs', 'inherit', 'snapdev', base_name], check=True) |
| 120 | # '--remote-ratelimit=20480', | 222 | case other: |
| 121 | '--progress', | 223 | raise ValueError(f'‘{base_name}’ is of type ‘{type_val}’') |
| 122 | '--list', | 224 | |
| 123 | '--filter=AMEi-x?', | 225 | env = os.environ.copy() |
| 124 | '--stats' if not dry_run else '--dry-run' | 226 | create_args = ['borg', |
| 125 | ] | 227 | 'create', |
| 126 | _, _, ts = snapshot.rpartition('@') | 228 | '--lock-wait=600', |
| 127 | creation_time = isoparse(ts).astimezone(timezone.utc) | 229 | '--one-file-system', |
| 128 | create_args += [f'--timestamp={creation_time.strftime("%Y-%m-%dT%H:%M:%S")}'] | 230 | '--exclude-caches', |
| 129 | env['BORG_FILES_CACHE_SUFFIX'] = basename | 231 | '--keep-exclude-tags', |
| 130 | create_args += [_archive_name(snapshot, target, archive_prefix), '.'] | 232 | '--compression=auto,zstd,10', |
| 131 | print({'create_args': create_args, 'cwd': dir, 'env': env}, file=sys.stderr) | 233 | '--chunker-params=10,23,16,4095', |
| 132 | subprocess.run(create_args, stdin=subprocess.DEVNULL, env=env, preexec_fn=lambda: as_borg(caps={CAP.DAC_READ_SEARCH}, cwd=dir), check=True) | 234 | '--files-cache=ctime,size', |
| 133 | # subprocess.run(create_args, stdin=subprocess.DEVNULL, env=env, preexec_fn=lambda: None, cwd=dir, check=True) | 235 | '--show-rc', |
| 134 | finally: | 236 | # '--remote-ratelimit=20480', |
| 135 | subprocess.run(['umount', dir], check=True) | 237 | '--progress', |
| 136 | os._exit(0) | 238 | '--list', |
| 239 | '--filter=AMEi-x?', | ||
| 240 | '--stats' if not dry_run else '--dry-run', | ||
| 241 | ] | ||
| 242 | _, _, ts = snapshot.rpartition('@') | ||
| 243 | creation_time = isoparse(ts).astimezone(timezone.utc) | ||
| 244 | create_args += [f'--timestamp={creation_time.strftime("%Y-%m-%dT%H:%M:%S")}'] | ||
| 245 | env['BORG_FILES_CACHE_SUFFIX'] = basename | ||
| 246 | archive_name = _archive_name(snapshot, target, archive_prefix) | ||
| 247 | target_host, _, target_path = target.rpartition(':') | ||
| 248 | *parents_init, _ = list(Path(target_path).parents) | ||
| 249 | backup_patterns = [*(map(lambda p: Path('.backup') / f'{target_host}:{p}', [Path(target_path), *parents_init])), Path('.backup') / target_host, Path('.backup')] | ||
| 250 | for pattern_file in backup_patterns: | ||
| 251 | if (dir / pattern_file).is_file(): | ||
| 252 | logger.debug('Found backup patterns at ‘%s’', dir / pattern_file) | ||
| 253 | create_args += [f'--patterns-from={pattern_file}', archive_name] | ||
| 254 | break | ||
| 255 | elif (dir / pattern_file).exists(): | ||
| 256 | logger.warn('‘%s’ exists but is no file', dir / pattern_file) | ||
| 137 | else: | 257 | else: |
| 138 | while True: | 258 | logger.debug('No backup patterns exist, checked %s', list(map(lambda pattern_file: str(dir / pattern_file), backup_patterns))) |
| 139 | waitpid, waitret = os.wait() | 259 | create_args += [archive_name, '.'] |
| 140 | if waitret != 0: | 260 | logger.debug('%s', {'create_args': create_args, 'cwd': dir, 'env': env}) |
| 141 | sys.exit(waitret) | 261 | |
| 142 | if waitpid == child: | 262 | with subprocess.Popen(create_args, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, preexec_fn=lambda: as_borg(caps={Cap.DAC_READ_SEARCH}), cwd=dir, text=True) as proc: |
| 143 | break | 263 | proc_logger = logger.getChild('borg') |
| 144 | return 0 | 264 | |
| 265 | poll = select.poll() | ||
| 266 | poll.register(proc.stdout, select.POLLIN | select.POLLHUP) | ||
| 267 | poll.register(proc.stderr, select.POLLIN | select.POLLHUP) | ||
| 268 | pollc = 2 | ||
| 269 | events = poll.poll() | ||
| 270 | while pollc > 0 and len(events) > 0: | ||
| 271 | for rfd, event in events: | ||
| 272 | if event & select.POLLIN: | ||
| 273 | if rfd == proc.stdout.fileno(): | ||
| 274 | line = proc.stdout.readline() | ||
| 275 | if len(line) > 0: | ||
| 276 | proc_logger.info(line[:-1]) | ||
| 277 | if rfd == proc.stderr.fileno(): | ||
| 278 | line = proc.stderr.readline() | ||
| 279 | if len(line) > 0: | ||
| 280 | proc_logger.info(line[:-1]) | ||
| 281 | if event & select.POLLHUP: | ||
| 282 | poll.unregister(rfd) | ||
| 283 | pollc -= 1 | ||
| 284 | |||
| 285 | if pollc > 0: | ||
| 286 | events = poll.poll() | ||
| 287 | |||
| 288 | for handler in proc_logger.handlers: | ||
| 289 | handler.flush() | ||
| 290 | |||
| 291 | ret = proc.wait() | ||
| 292 | if ret != 0: | ||
| 293 | raise Exception(f'borg subprocess exited with returncode {ret}') | ||
| 294 | |||
| 295 | with Manager() as manager: | ||
| 296 | tmpdir_q = manager.Queue(1) | ||
| 297 | with closing(Process(target=do_create, args=(tmpdir_q,), name='do_create')) as p: | ||
| 298 | p.start() | ||
| 299 | |||
| 300 | with TemporaryDirectory(prefix=f'borg-mount_{basename}_', dir=os.getenv('RUNTIME_DIRECTORY')) as tmpdir: | ||
| 301 | tmpdir_q.put(tmpdir) | ||
| 302 | p.join() | ||
| 303 | if p.exitcode == 0 and dry_run: | ||
| 304 | return 125 | ||
| 305 | return p.exitcode | ||
| 145 | 306 | ||
| 146 | def sigterm(signum, frame): | 307 | def sigterm(signum, frame): |
| 147 | raise SystemExit(128 + signum) | 308 | raise SystemExit(128 + signum) |
| @@ -155,6 +316,32 @@ def main(): | |||
| 155 | console_handler.setFormatter( logging.Formatter('[%(levelname)s](%(name)s): %(message)s') ) | 316 | console_handler.setFormatter( logging.Formatter('[%(levelname)s](%(name)s): %(message)s') ) |
| 156 | if sys.stderr.isatty(): | 317 | if sys.stderr.isatty(): |
| 157 | console_handler.setFormatter( logging.Formatter('%(asctime)s [%(levelname)s](%(name)s): %(message)s') ) | 318 | console_handler.setFormatter( logging.Formatter('%(asctime)s [%(levelname)s](%(name)s): %(message)s') ) |
| 319 | |||
| 320 | burst_max = 10000 | ||
| 321 | burst = burst_max | ||
| 322 | last_use = None | ||
| 323 | inv_rate = 1e6 | ||
| 324 | def consume_filter(record): | ||
| 325 | nonlocal burst, burst_max, inv_rate, last_use | ||
| 326 | |||
| 327 | delay = None | ||
| 328 | while True: | ||
| 329 | now = time.monotonic_ns() | ||
| 330 | burst = min(burst_max, burst + math.floor((now - last_use) / inv_rate)) if last_use else burst_max | ||
| 331 | last_use = now | ||
| 332 | |||
| 333 | if burst > 0: | ||
| 334 | burst -= 1 | ||
| 335 | if delay: | ||
| 336 | delay = now - delay | ||
| 337 | |||
| 338 | return True | ||
| 339 | |||
| 340 | if delay is None: | ||
| 341 | delay = now | ||
| 342 | time.sleep(inv_rate / 1e9) | ||
| 343 | console_handler.addFilter(consume_filter) | ||
| 344 | |||
| 158 | logger.addHandler(console_handler) | 345 | logger.addHandler(console_handler) |
| 159 | 346 | ||
| 160 | # log uncaught exceptions | 347 | # log uncaught exceptions |
| @@ -167,7 +354,8 @@ def main(): | |||
| 167 | sys.excepthook = log_exceptions | 354 | sys.excepthook = log_exceptions |
| 168 | 355 | ||
| 169 | parser = argparse.ArgumentParser(prog='borgsnap') | 356 | parser = argparse.ArgumentParser(prog='borgsnap') |
| 170 | parser.add_argument('--verbose', '-v', action='count', default=0) | 357 | parser.add_argument('--verbose', '-v', dest='log_level', action='append_const', const=-1) |
| 358 | parser.add_argument('--quiet', '-q', dest='log_level', action='append_const', const=1) | ||
| 171 | parser.add_argument('--target', metavar='REPO', default='yggdrasil.borgbase:repo') | 359 | parser.add_argument('--target', metavar='REPO', default='yggdrasil.borgbase:repo') |
| 172 | parser.add_argument('--archive-prefix', metavar='REPO', default='yggdrasil.vidhar.') | 360 | parser.add_argument('--archive-prefix', metavar='REPO', default='yggdrasil.vidhar.') |
| 173 | subparsers = parser.add_subparsers() | 361 | subparsers = parser.add_subparsers() |
| @@ -183,12 +371,13 @@ def main(): | |||
| 183 | create_parser.set_defaults(cmd=create) | 371 | create_parser.set_defaults(cmd=create) |
| 184 | args = parser.parse_args() | 372 | args = parser.parse_args() |
| 185 | 373 | ||
| 186 | if args.verbose <= 0: | 374 | LOG_LEVELS = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL] |
| 187 | logger.setLevel(logging.WARNING) | 375 | DEFAULT_LOG_LEVEL = logging.ERROR |
| 188 | elif args.verbose <= 1: | 376 | log_level = LOG_LEVELS.index(DEFAULT_LOG_LEVEL) |
| 189 | logger.setLevel(logging.INFO) | 377 | |
| 190 | else: | 378 | for adjustment in args.log_level or (): |
| 191 | logger.setLevel(logging.DEBUG) | 379 | log_level = min(len(LOG_LEVELS) - 1, max(log_level + adjustment, 0)) |
| 380 | logger.setLevel(LOG_LEVELS[log_level]) | ||
| 192 | 381 | ||
| 193 | cmdArgs = {} | 382 | cmdArgs = {} |
| 194 | for copy in {'target', 'archive_prefix', 'snapshot', 'cache_file', 'dry_run'}: | 383 | for copy in {'target', 'archive_prefix', 'snapshot', 'cache_file', 'dry_run'}: |
diff --git a/hosts/vidhar/borg/default.nix b/hosts/vidhar/borg/default.nix index 79c75c4d..7e3129f2 100644 --- a/hosts/vidhar/borg/default.nix +++ b/hosts/vidhar/borg/default.nix | |||
| @@ -74,7 +74,7 @@ let | |||
| 74 | copy | 74 | copy |
| 75 | 75 | ||
| 76 | wrapProgram $out/bin/copy \ | 76 | wrapProgram $out/bin/copy \ |
| 77 | --prefix PATH : ${makeBinPath (with pkgs; [util-linux borgbackup])}:${config.security.wrapperDir} | 77 | --prefix PATH : ${makeBinPath (with pkgs; [config.boot.zfs.package util-linux borgbackup])}:${config.security.wrapperDir} |
| 78 | ''; | 78 | ''; |
| 79 | }); | 79 | }); |
| 80 | 80 | ||
| @@ -88,7 +88,6 @@ let | |||
| 88 | atomicwrites | 88 | atomicwrites |
| 89 | pyprctl | 89 | pyprctl |
| 90 | python-unshare | 90 | python-unshare |
| 91 | xdg | ||
| 92 | python-dateutil | 91 | python-dateutil |
| 93 | ''; | 92 | ''; |
| 94 | postInstall = '' | 93 | postInstall = '' |
| @@ -101,14 +100,13 @@ let | |||
| 101 | (self: super: { python-unshare = super.python-unshare.overrideAttrs (oldAttrs: { name = "python-unshare-0.2.1"; version = "0.2.1"; }); }) | 100 | (self: super: { python-unshare = super.python-unshare.overrideAttrs (oldAttrs: { name = "python-unshare-0.2.1"; version = "0.2.1"; }); }) |
| 102 | ]; | 101 | ]; |
| 103 | 102 | ||
| 104 | _.xdg.buildInputs.add = with pkgs."python3Packages"; [ poetry ]; | ||
| 105 | _.tomli.buildInputs.add = with pkgs."python3Packages"; [ flit-core ]; | 103 | _.tomli.buildInputs.add = with pkgs."python3Packages"; [ flit-core ]; |
| 106 | }; | 104 | }; |
| 107 | in { | 105 | in { |
| 108 | config = { | 106 | config = { |
| 109 | services.zfssnap.config.exec = { | 107 | services.zfssnap.config.exec = { |
| 110 | check = "${borgsnap}/bin/borgsnap -vvv --target yggdrasil.borgbase:repo --archive-prefix yggdrasil.vidhar. check --cache-file /run/zfssnap-prune/archives-cache.json"; | 108 | check = "${borgsnap}/bin/borgsnap -vvv --target yggdrasil.borgbase:repo --archive-prefix yggdrasil.vidhar. check --cache-file /run/zfssnap-prune/archives-cache.json"; |
| 111 | cmd = "${borgsnap}/bin/borgsnap -vvv --target yggdrasil.borgbase:repo --archive-prefix yggdrasil.vidhar. create --dry-run"; | 109 | cmd = "${borgsnap}/bin/borgsnap -vvv --target yggdrasil.borgbase:repo --archive-prefix yggdrasil.vidhar. create"; |
| 112 | 110 | ||
| 113 | halfweekly = "8"; | 111 | halfweekly = "8"; |
| 114 | monthly = "-1"; | 112 | monthly = "-1"; |
diff --git a/hosts/vidhar/network/bifrost/default.nix b/hosts/vidhar/network/bifrost/default.nix index 8c2cc1de..ec354f81 100644 --- a/hosts/vidhar/network/bifrost/default.nix +++ b/hosts/vidhar/network/bifrost/default.nix | |||
| @@ -40,18 +40,30 @@ in { | |||
| 40 | Destination = "2a03:4000:52:ada:4::/80"; | 40 | Destination = "2a03:4000:52:ada:4::/80"; |
| 41 | }; | 41 | }; |
| 42 | } | 42 | } |
| 43 | { routeConfig ={ | 43 | { routeConfig = { |
| 44 | Gateway = "2a03:4000:52:ada:4::"; | 44 | Gateway = "2a03:4000:52:ada:4::"; |
| 45 | GatewayOnLink = true; | 45 | GatewayOnLink = true; |
| 46 | Table = "bifrost"; | 46 | Table = "bifrost"; |
| 47 | }; | 47 | }; |
| 48 | } | 48 | } |
| 49 | { routeConfig = { | ||
| 50 | Destination = "2a03:4000:52:ada:4::/80"; | ||
| 51 | GatewayOnLink = true; | ||
| 52 | Table = "bifrost"; | ||
| 53 | }; | ||
| 54 | } | ||
| 55 | { routeConfig = { | ||
| 56 | Destination = "2a03:4000:52:ada:4:1::/96"; | ||
| 57 | GatewayOnLink = true; | ||
| 58 | Table = "bifrost"; | ||
| 59 | }; | ||
| 60 | } | ||
| 49 | ]; | 61 | ]; |
| 50 | routingPolicyRules = [ | 62 | routingPolicyRules = [ |
| 51 | { routingPolicyRuleConfig = { | 63 | { routingPolicyRuleConfig = { |
| 52 | Table = "bifrost"; | 64 | Table = "bifrost"; |
| 53 | From = "2a03:4000:52:ada:4:1::/96"; | 65 | From = "2a03:4000:52:ada:4:1::/96"; |
| 54 | Priority = 200; | 66 | Priority = 1; |
| 55 | }; | 67 | }; |
| 56 | } | 68 | } |
| 57 | ]; | 69 | ]; |
| @@ -64,6 +76,8 @@ in { | |||
| 64 | }; | 76 | }; |
| 65 | }; | 77 | }; |
| 66 | }; | 78 | }; |
| 79 | |||
| 80 | config.routeTables.bifrost = 1026; | ||
| 67 | }; | 81 | }; |
| 68 | systemd.services."systemd-networkd".serviceConfig.LoadCredential = [ | 82 | systemd.services."systemd-networkd".serviceConfig.LoadCredential = [ |
| 69 | "bifrost.priv:${config.sops.secrets.bifrost.path}" | 83 | "bifrost.priv:${config.sops.secrets.bifrost.path}" |
diff --git a/modules/yggdrasil-wg/default.nix b/modules/yggdrasil-wg/default.nix index c27eb286..8525cea0 100644 --- a/modules/yggdrasil-wg/default.nix +++ b/modules/yggdrasil-wg/default.nix | |||
| @@ -82,7 +82,7 @@ let | |||
| 82 | mkPrivateKeyPath = family: host: ./hosts + "/${family}" + "/${host}.priv"; | 82 | mkPrivateKeyPath = family: host: ./hosts + "/${family}" + "/${host}.priv"; |
| 83 | 83 | ||
| 84 | kernel = config.boot.kernelPackages; | 84 | kernel = config.boot.kernelPackages; |
| 85 | 85 | ||
| 86 | publicKeyPath = family: mkPublicKeyPath family hostName; | 86 | publicKeyPath = family: mkPublicKeyPath family hostName; |
| 87 | privateKeyPath = family: mkPrivateKeyPath family hostName; | 87 | privateKeyPath = family: mkPrivateKeyPath family hostName; |
| 88 | inNetwork' = family: pathExists (privateKeyPath family) && pathExists (publicKeyPath family); | 88 | inNetwork' = family: pathExists (privateKeyPath family) && pathExists (publicKeyPath family); |
| @@ -221,7 +221,7 @@ in { | |||
| 221 | }; | 221 | }; |
| 222 | } | 222 | } |
| 223 | ] ++ (concatMap (router: map (rAddr: { routeConfig = { Destination = "::/0"; Gateway = stripSubnet rAddr; GatewayOnLink = true; Table = "yggdrasil"; }; }) batHostIPs.${router}) (filter (router: router != hostName) routers)); | 223 | ] ++ (concatMap (router: map (rAddr: { routeConfig = { Destination = "::/0"; Gateway = stripSubnet rAddr; GatewayOnLink = true; Table = "yggdrasil"; }; }) batHostIPs.${router}) (filter (router: router != hostName) routers)); |
| 224 | routingPolicyRules = map (addr: { routingPolicyRuleConfig = { Table = "yggdrasil"; From = stripSubnet addr; Priority = 1; }; }) batHostIPs.${hostName}; | 224 | routingPolicyRules = map (addr: { routingPolicyRuleConfig = { Table = "yggdrasil"; From = addr; Priority = 1; }; }) batHostIPs.${hostName}; |
| 225 | linkConfig = { | 225 | linkConfig = { |
| 226 | MACAddress = "${batHostMACs.${hostName}}"; | 226 | MACAddress = "${batHostMACs.${hostName}}"; |
| 227 | RequiredForOnline = false; | 227 | RequiredForOnline = false; |
diff --git a/modules/zfssnap/default.nix b/modules/zfssnap/default.nix index 42cdf46f..735e73ec 100644 --- a/modules/zfssnap/default.nix +++ b/modules/zfssnap/default.nix | |||
| @@ -1,32 +1,20 @@ | |||
| 1 | { config, pkgs, lib, ... }: | 1 | { config, pkgs, lib, flakeInputs, ... }: |
| 2 | 2 | ||
| 3 | with lib; | 3 | with lib; |
| 4 | 4 | ||
| 5 | let | 5 | let |
| 6 | zfssnap = pkgs.stdenv.mkDerivation rec { | 6 | zfssnap = flakeInputs.mach-nix.lib.${config.nixpkgs.system}.buildPythonPackage rec { |
| 7 | name = "zfssnap"; | 7 | pname = "zfssnap"; |
| 8 | src = ./zfssnap.py; | 8 | src = ./zfssnap; |
| 9 | version = "0.0.0"; | ||
| 10 | ignoreDataOutdated = true; | ||
| 9 | 11 | ||
| 10 | phases = [ "buildPhase" "checkPhase" "installPhase" ]; | 12 | requirements = '' |
| 11 | 13 | pyxdg | |
| 12 | buildInputs = with pkgs; [makeWrapper]; | 14 | pytimeparse |
| 13 | 15 | python-dateutil | |
| 14 | python = pkgs.python39.withPackages (ps: with ps; [pyxdg pytimeparse python-dateutil]); | ||
| 15 | |||
| 16 | buildPhase = '' | ||
| 17 | substitute $src zfssnap \ | ||
| 18 | --subst-var-by python ${escapeShellArg python} | ||
| 19 | ''; | ||
| 20 | |||
| 21 | doCheck = true; | ||
| 22 | checkPhase = '' | ||
| 23 | ${python}/bin/python -m py_compile zfssnap | ||
| 24 | ''; | 16 | ''; |
| 25 | 17 | postInstall = '' | |
| 26 | installPhase = '' | ||
| 27 | install -m 0755 -D -t $out/bin \ | ||
| 28 | zfssnap | ||
| 29 | |||
| 30 | wrapProgram $out/bin/zfssnap \ | 18 | wrapProgram $out/bin/zfssnap \ |
| 31 | --prefix PATH : ${makeBinPath [config.boot.zfs.package]} | 19 | --prefix PATH : ${makeBinPath [config.boot.zfs.package]} |
| 32 | ''; | 20 | ''; |
| @@ -71,7 +59,9 @@ in { | |||
| 71 | before = [ "zfssnap-prune.service" ]; | 59 | before = [ "zfssnap-prune.service" ]; |
| 72 | serviceConfig = { | 60 | serviceConfig = { |
| 73 | Type = "oneshot"; | 61 | Type = "oneshot"; |
| 74 | ExecStart = "${zfssnap}/bin/zfssnap -v"; | 62 | ExecStart = "${zfssnap}/bin/zfssnap -vv"; |
| 63 | |||
| 64 | LogRateLimitIntervalSec = 0; | ||
| 75 | }; | 65 | }; |
| 76 | }; | 66 | }; |
| 77 | systemd.services."zfssnap-prune" = { | 67 | systemd.services."zfssnap-prune" = { |
| @@ -82,7 +72,9 @@ in { | |||
| 82 | ExecStart = let | 72 | ExecStart = let |
| 83 | mkSectionName = name: strings.escape [ "[" "]" ] (strings.toUpper name); | 73 | mkSectionName = name: strings.escape [ "[" "]" ] (strings.toUpper name); |
| 84 | zfssnapConfig = generators.toINI { inherit mkSectionName; } cfg.config; | 74 | zfssnapConfig = generators.toINI { inherit mkSectionName; } cfg.config; |
| 85 | in "${zfssnap}/bin/zfssnap -vv prune --config=${pkgs.writeText "zfssnap.ini" zfssnapConfig}"; | 75 | in "${zfssnap}/bin/zfssnap -vv prune --exec-newest --config=${pkgs.writeText "zfssnap.ini" zfssnapConfig}"; # DEBUG |
| 76 | |||
| 77 | LogRateLimitIntervalSec = 0; | ||
| 86 | }; | 78 | }; |
| 87 | }; | 79 | }; |
| 88 | 80 | ||
diff --git a/modules/zfssnap/zfssnap/setup.py b/modules/zfssnap/zfssnap/setup.py new file mode 100644 index 00000000..6c58757d --- /dev/null +++ b/modules/zfssnap/zfssnap/setup.py | |||
| @@ -0,0 +1,10 @@ | |||
| 1 | from setuptools import setup | ||
| 2 | |||
| 3 | setup(name='zfssnap', | ||
| 4 | packages=['zfssnap'], | ||
| 5 | entry_points={ | ||
| 6 | 'console_scripts': [ | ||
| 7 | 'zfssnap=zfssnap.__main__:main', | ||
| 8 | ], | ||
| 9 | } | ||
| 10 | ) | ||
diff --git a/modules/zfssnap/zfssnap.py b/modules/zfssnap/zfssnap/zfssnap/__main__.py index a8dae75f..a0eade78 100644 --- a/modules/zfssnap/zfssnap.py +++ b/modules/zfssnap/zfssnap/zfssnap/__main__.py | |||
| @@ -45,6 +45,9 @@ TIME_PATTERNS = OrderedDict([ | |||
| 45 | ("yearly", lambda t: t.strftime('%Y')), | 45 | ("yearly", lambda t: t.strftime('%Y')), |
| 46 | ]) | 46 | ]) |
| 47 | 47 | ||
| 48 | PROP_DO_AUTO_SNAPSHOT = 'li.yggdrasil:auto-snapshot' | ||
| 49 | PROP_IS_AUTO_SNAPSHOT = 'li.yggdrasil:is-auto-snapshot' | ||
| 50 | |||
| 48 | @dataclass(eq=True, order=True, frozen=True) | 51 | @dataclass(eq=True, order=True, frozen=True) |
| 49 | class Snap: | 52 | class Snap: |
| 50 | name: str | 53 | name: str |
| @@ -68,25 +71,28 @@ def _snap_name(item, time=_now()): | |||
| 68 | 71 | ||
| 69 | def _log_cmd(*args): | 72 | def _log_cmd(*args): |
| 70 | fmt_args = ' '.join(map(shlex.quote, args)) | 73 | fmt_args = ' '.join(map(shlex.quote, args)) |
| 71 | logger.debug(f'Running command: {fmt_args}') | 74 | logger.debug('Running command: %s', fmt_args) |
| 72 | 75 | ||
| 73 | def _get_items(): | 76 | def _get_items(): |
| 74 | items = {} | 77 | items = {} |
| 75 | 78 | ||
| 76 | args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', '-t', 'filesystem,volume', '-s', 'local,default,inherited,temporary,received', 'li.yggdrasil:auto-snapshot'] | 79 | args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', '-t', 'filesystem,volume', PROP_DO_AUTO_SNAPSHOT] |
| 77 | _log_cmd(*args) | 80 | _log_cmd(*args) |
| 78 | with subprocess.Popen(args, stdout=subprocess.PIPE) as proc: | 81 | with subprocess.Popen(args, stdout=subprocess.PIPE) as proc: |
| 79 | text_stdout = io.TextIOWrapper(proc.stdout) | 82 | text_stdout = io.TextIOWrapper(proc.stdout) |
| 80 | reader = csv.DictReader(text_stdout, fieldnames=['name', 'setting'], delimiter='\t', quoting=csv.QUOTE_NONE) | 83 | reader = csv.DictReader(text_stdout, fieldnames=['name', 'value'], delimiter='\t', quoting=csv.QUOTE_NONE) |
| 81 | Row = namedtuple('Row', reader.fieldnames) | 84 | Row = namedtuple('Row', reader.fieldnames) |
| 82 | for row in [Row(**data) for data in reader]: | 85 | for row in [Row(**data) for data in reader]: |
| 83 | items[row.name] = bool(strtobool(row.setting)) | 86 | if not row.value or row.value == '-': |
| 87 | continue | ||
| 88 | |||
| 89 | items[row.name] = bool(strtobool(row.value)) | ||
| 84 | 90 | ||
| 85 | return items | 91 | return items |
| 86 | 92 | ||
| 87 | def _get_snaps(only_auto=True): | 93 | def _get_snaps(only_auto=True): |
| 88 | snapshots = defaultdict(list) | 94 | snapshots = defaultdict(list) |
| 89 | args = ['zfs', 'list', '-H', '-p', '-t', 'snapshot', '-o', 'name,li.yggdrasil:is-auto-snapshot,creation'] | 95 | args = ['zfs', 'list', '-H', '-p', '-t', 'snapshot', '-o', f'name,{PROP_IS_AUTO_SNAPSHOT},creation'] |
| 90 | _log_cmd(*args) | 96 | _log_cmd(*args) |
| 91 | with subprocess.Popen(args, stdout=subprocess.PIPE) as proc: | 97 | with subprocess.Popen(args, stdout=subprocess.PIPE) as proc: |
| 92 | text_stdout = io.TextIOWrapper(proc.stdout) | 98 | text_stdout = io.TextIOWrapper(proc.stdout) |
| @@ -102,17 +108,26 @@ def _get_snaps(only_auto=True): | |||
| 102 | 108 | ||
| 103 | return snapshots | 109 | return snapshots |
| 104 | 110 | ||
| 105 | def prune(config, dry_run, keep_newest, do_exec): | 111 | def prune(config, dry_run, keep_newest, do_exec, exec_newest): |
| 106 | do_exec = do_exec and 'EXEC' in config | 112 | do_exec = do_exec and 'EXEC' in config |
| 107 | prune_timezone = config.gettimezone('KEEP', 'timezone', fallback=tzutc()) | 113 | prune_timezone = config.gettimezone('KEEP', 'timezone', fallback=tzutc()) |
| 108 | logger.debug(f'prune timezone: {prune_timezone}') | 114 | logger.debug('prune timezone: %s', prune_timezone) |
| 109 | 115 | ||
| 110 | items = _get_snaps() | 116 | items = _get_snaps() |
| 111 | 117 | ||
| 118 | kept_count = defaultdict(lambda: defaultdict(lambda: 0)) | ||
| 119 | kept_because = OrderedDict() | ||
| 120 | def keep_because(base, snap, rule, period=None): | ||
| 121 | nonlocal kept_count, kept_because | ||
| 122 | kept_count[rule][base] += 1 | ||
| 123 | if snap not in kept_because: | ||
| 124 | kept_because[snap] = deque() | ||
| 125 | kept_because[snap].append(KeptBecause(rule=rule, ix=kept_count[rule][base], base=base, period=period)) | ||
| 126 | |||
| 112 | exec_candidates = set() | 127 | exec_candidates = set() |
| 113 | if do_exec: | 128 | if do_exec: |
| 114 | exec_timezone = config.gettimezone('EXEC', 'timezone', fallback=prune_timezone) | 129 | exec_timezone = config.gettimezone('EXEC', 'timezone', fallback=prune_timezone) |
| 115 | logger.debug(f'exec timezone: {exec_timezone}') | 130 | logger.debug('exec timezone: %s', exec_timezone) |
| 116 | 131 | ||
| 117 | for rule, pattern in TIME_PATTERNS.items(): | 132 | for rule, pattern in TIME_PATTERNS.items(): |
| 118 | desired_count = config.getint('EXEC', rule, fallback=0) | 133 | desired_count = config.getint('EXEC', rule, fallback=0) |
| @@ -120,7 +135,7 @@ def prune(config, dry_run, keep_newest, do_exec): | |||
| 120 | for base, snaps in items.items(): | 135 | for base, snaps in items.items(): |
| 121 | periods = OrderedDict() | 136 | periods = OrderedDict() |
| 122 | 137 | ||
| 123 | for snap in sorted(snaps, key=lambda snap: snap.creation): | 138 | for snap in sorted(snaps, key=lambda snap: snap.creation, reverse=exec_newest): |
| 124 | period = pattern(snap.creation.astimezone(exec_timezone)) | 139 | period = pattern(snap.creation.astimezone(exec_timezone)) |
| 125 | if period not in periods: | 140 | if period not in periods: |
| 126 | periods[period] = deque() | 141 | periods[period] = deque() |
| @@ -134,15 +149,16 @@ def prune(config, dry_run, keep_newest, do_exec): | |||
| 134 | 149 | ||
| 135 | for snap in period_snaps: | 150 | for snap in period_snaps: |
| 136 | exec_candidates.add(snap) | 151 | exec_candidates.add(snap) |
| 137 | logger.debug(f'{snap.name} is exec candidate') | 152 | logger.debug('‘%s’ is exec candidate', snap.name) |
| 138 | to_exec -= 1 | 153 | to_exec -= 1 |
| 139 | break | 154 | break |
| 140 | 155 | ||
| 141 | if to_exec > 0: | 156 | if to_exec > 0: |
| 142 | logger.debug(f'Missing {to_exec} to fulfill exec {rule}={desired_count} for ‘{base}’') | 157 | logger.debug('Missing %d to fulfill exec %s=%d for ‘%s’', to_exec, rule, desired_count, base) |
| 143 | 158 | ||
| 144 | check_cmd = config.get('EXEC', 'check', fallback=None) | 159 | check_cmd = config.get('EXEC', 'check', fallback=None) |
| 145 | if check_cmd: | 160 | if check_cmd: |
| 161 | logger.debug('exec_candidates=%s', exec_candidates) | ||
| 146 | already_execed = set() | 162 | already_execed = set() |
| 147 | for snap in exec_candidates: | 163 | for snap in exec_candidates: |
| 148 | args = [] | 164 | args = [] |
| @@ -152,7 +168,20 @@ def prune(config, dry_run, keep_newest, do_exec): | |||
| 152 | check_res = subprocess.run(args) | 168 | check_res = subprocess.run(args) |
| 153 | if check_res.returncode == 0: | 169 | if check_res.returncode == 0: |
| 154 | already_execed.add(snap) | 170 | already_execed.add(snap) |
| 155 | logger.debug(f'{snap.name} already execed') | 171 | logger.debug('‘%s’ already execed', snap.name) |
| 172 | elif check_res.returncode == 124: | ||
| 173 | already_execed.add(snap) | ||
| 174 | logger.warn('‘%s’ ignored', snap.name) | ||
| 175 | pass | ||
| 176 | elif check_res.returncode == 125: | ||
| 177 | already_execed.add(snap) | ||
| 178 | logger.info('‘%s’ ignored but specified for keeping, doing so...', snap.name) | ||
| 179 | base_name, _, _ = snap.name.rpartition('@') | ||
| 180 | keep_because(base_name, snap.name, 'exec-ignored') | ||
| 181 | elif check_res.returncode == 126: | ||
| 182 | logger.debug('‘%s’ to exec', snap.name) | ||
| 183 | else: | ||
| 184 | check_res.check_returncode() | ||
| 156 | exec_candidates -= already_execed | 185 | exec_candidates -= already_execed |
| 157 | 186 | ||
| 158 | exec_cmd = config.get('EXEC', 'cmd', fallback=None) | 187 | exec_cmd = config.get('EXEC', 'cmd', fallback=None) |
| @@ -160,28 +189,26 @@ def prune(config, dry_run, keep_newest, do_exec): | |||
| 160 | if exec_cmd: | 189 | if exec_cmd: |
| 161 | execed = set() | 190 | execed = set() |
| 162 | for snap in sorted(exec_candidates, key=lambda snap: snap.creation): | 191 | for snap in sorted(exec_candidates, key=lambda snap: snap.creation): |
| 163 | if len(execed) >= exec_count: | 192 | if exec_count > 0 and len(execed) >= exec_count: |
| 164 | logger.debug(f'exc_count of {exec_count} reached') | 193 | logger.debug('exec_count of %d reached', exec_count) |
| 165 | break | 194 | break |
| 166 | 195 | ||
| 167 | args = [] | 196 | args = [] |
| 168 | args += shlex.split(exec_cmd) | 197 | args += shlex.split(exec_cmd) |
| 169 | args += [snap.name] | 198 | args += [snap.name] |
| 170 | _log_cmd(*args) | 199 | _log_cmd(*args) |
| 171 | subprocess.run(args).check_returncode() | 200 | p = subprocess.run(args) |
| 201 | if p.returncode == 125: | ||
| 202 | logger.warn('got dry-run returncode for ‘%s’, keeping...', snap.name) | ||
| 203 | base_name, _, _ = snap.name.rpartition('@') | ||
| 204 | keep_because(base_name, snap.name, 'exec-dryrun') | ||
| 205 | pass | ||
| 206 | else: | ||
| 207 | p.check_returncode() | ||
| 172 | execed.add(snap) | 208 | execed.add(snap) |
| 173 | 209 | ||
| 174 | exec_candidates -= execed | 210 | exec_candidates -= execed |
| 175 | 211 | ||
| 176 | kept_count = defaultdict(lambda: defaultdict(lambda: 0)) | ||
| 177 | kept_because = OrderedDict() | ||
| 178 | def keep_because(base, snap, rule, period=None): | ||
| 179 | nonlocal kept_count, kept_because | ||
| 180 | kept_count[rule][base] += 1 | ||
| 181 | if snap not in kept_because: | ||
| 182 | kept_because[snap] = deque() | ||
| 183 | kept_because[snap].append(KeptBecause(rule=rule, ix=kept_count[rule][base], base=base, period=period)) | ||
| 184 | |||
| 185 | for candidate in exec_candidates: | 212 | for candidate in exec_candidates: |
| 186 | base_name, _, _ = candidate.name.rpartition('@') | 213 | base_name, _, _ = candidate.name.rpartition('@') |
| 187 | keep_because(base_name, candidate.name, 'exec-candidate') | 214 | keep_because(base_name, candidate.name, 'exec-candidate') |
| @@ -191,10 +218,10 @@ def prune(config, dry_run, keep_newest, do_exec): | |||
| 191 | for base, snaps in items.items(): | 218 | for base, snaps in items.items(): |
| 192 | time_ref = max(snaps, key=lambda snap: snap.creation, default=None) | 219 | time_ref = max(snaps, key=lambda snap: snap.creation, default=None) |
| 193 | if not time_ref: | 220 | if not time_ref: |
| 194 | logger.warn(f'Nothing to keep for ‘{base}’') | 221 | logger.warn('Nothing to keep for ‘%s’', base) |
| 195 | continue | 222 | continue |
| 196 | 223 | ||
| 197 | logger.info(f'Using ‘{time_ref.name}’ as time reference for ‘{base}’') | 224 | logger.info('Using ‘%s’ as time reference for ‘%s’', time_ref.name, base) |
| 198 | within_cutoff = time_ref.creation - within | 225 | within_cutoff = time_ref.creation - within |
| 199 | 226 | ||
| 200 | for snap in snaps: | 227 | for snap in snaps: |
| @@ -227,11 +254,10 @@ def prune(config, dry_run, keep_newest, do_exec): | |||
| 227 | break | 254 | break |
| 228 | 255 | ||
| 229 | if to_keep > 0: | 256 | if to_keep > 0: |
| 230 | logger.debug(f'Missing {to_keep} to fulfill prune {rule}={desired_count} for ‘{base}’') | 257 | logger.debug('Missing %d to fulfill prune %s=%d for ‘%s’', to_keep, rule, desired_count, base) |
| 231 | 258 | ||
| 232 | for snap, reasons in kept_because.items(): | 259 | for snap, reasons in kept_because.items(): |
| 233 | reasons_str = ', '.join(map(str, reasons)) | 260 | logger.info('Keeping ‘%s’ because: %s', snap, ', '.join(map(str, reasons))) |
| 234 | logger.info(f'Keeping ‘{snap}’ because: {reasons_str}') | ||
| 235 | all_snaps = {snap.name for _, snaps in items.items() for snap in snaps} | 261 | all_snaps = {snap.name for _, snaps in items.items() for snap in snaps} |
| 236 | to_destroy = all_snaps - {*kept_because} | 262 | to_destroy = all_snaps - {*kept_because} |
| 237 | if not to_destroy: | 263 | if not to_destroy: |
| @@ -245,9 +271,9 @@ def prune(config, dry_run, keep_newest, do_exec): | |||
| 245 | _log_cmd(*args) | 271 | _log_cmd(*args) |
| 246 | subprocess.run(args, check=True) | 272 | subprocess.run(args, check=True) |
| 247 | if dry_run: | 273 | if dry_run: |
| 248 | logger.info(f'Would have pruned ‘{snap}’') | 274 | logger.info('Would have pruned ‘%s’', snap) |
| 249 | else: | 275 | else: |
| 250 | logger.info(f'Pruned ‘{snap}’') | 276 | logger.info('Pruned ‘%s’', snap) |
| 251 | 277 | ||
| 252 | def rename(snapshots, destroy=False, set_is_auto=False): | 278 | def rename(snapshots, destroy=False, set_is_auto=False): |
| 253 | args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', 'creation', *snapshots] | 279 | args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', 'creation', *snapshots] |
| @@ -262,29 +288,29 @@ def rename(snapshots, destroy=False, set_is_auto=False): | |||
| 262 | base_name, _, _ = row.name.rpartition('@') | 288 | base_name, _, _ = row.name.rpartition('@') |
| 263 | new_name = _snap_name(base_name, time=creation) | 289 | new_name = _snap_name(base_name, time=creation) |
| 264 | if new_name == row.name: | 290 | if new_name == row.name: |
| 265 | logger.debug(f'Not renaming ‘{row.name}’ since name is already correct') | 291 | logger.debug('Not renaming ‘%s’ since name is already correct', row.name) |
| 266 | continue | 292 | continue |
| 267 | 293 | ||
| 268 | if new_name in renamed_to: | 294 | if new_name in renamed_to: |
| 269 | if destroy: | 295 | if destroy: |
| 270 | logger.warning(f'Destroying ‘{row.name}’ since ‘{new_name}’ was already renamed to') | 296 | logger.warning('Destroying ‘%s’ since ‘%s’ was already renamed to', row.name, new_name) |
| 271 | args = ['zfs', 'destroy', row.name] | 297 | args = ['zfs', 'destroy', row.name] |
| 272 | _log_cmd(*args) | 298 | _log_cmd(*args) |
| 273 | subprocess.run(args, check=True) | 299 | subprocess.run(args, check=True) |
| 274 | else: | 300 | else: |
| 275 | logger.info(f'Skipping ‘{row.name}’ since ‘{new_name}’ was already renamed to') | 301 | logger.info('Skipping ‘%s’ since ‘%s’ was already renamed to', row.name, new_name) |
| 276 | 302 | ||
| 277 | continue | 303 | continue |
| 278 | 304 | ||
| 279 | logger.info(f'Renaming ‘{row.name}’ to ‘{new_name}’') | 305 | logger.info('Renaming ‘%s’ to ‘%s’', row.name, new_name) |
| 280 | args = ['zfs', 'rename', row.name, new_name] | 306 | args = ['zfs', 'rename', row.name, new_name] |
| 281 | _log_cmd(*args) | 307 | _log_cmd(*args) |
| 282 | subprocess.run(args, check=True) | 308 | subprocess.run(args, check=True) |
| 283 | renamed_to.add(new_name) | 309 | renamed_to.add(new_name) |
| 284 | 310 | ||
| 285 | if set_is_auto: | 311 | if set_is_auto: |
| 286 | logger.info(f'Setting is-auto-snapshot on ‘{new_name}’') | 312 | logger.info('Setting is-auto-snapshot on ‘%s’', new_name) |
| 287 | args = ['zfs', 'set', 'li.yggdrasil:is-auto-snapshot=true', new_name] | 313 | args = ['zfs', 'set', f'{PROP_IS_AUTO_SNAPSHOT}=true', new_name] |
| 288 | _log_cmd(*args) | 314 | _log_cmd(*args) |
| 289 | subprocess.run(args, check=True) | 315 | subprocess.run(args, check=True) |
| 290 | 316 | ||
| @@ -301,7 +327,7 @@ def autosnap(): | |||
| 301 | else: | 327 | else: |
| 302 | all_snap_names |= snap_names | 328 | all_snap_names |= snap_names |
| 303 | 329 | ||
| 304 | args = ['zfs', 'snapshot', '-o', 'li.yggdrasil:is-auto-snapshot=true'] | 330 | args = ['zfs', 'snapshot', '-o', f'{PROP_IS_AUTO_SNAPSHOT}=true'] |
| 305 | if recursive: | 331 | if recursive: |
| 306 | args += ['-r'] | 332 | args += ['-r'] |
| 307 | args += snap_names | 333 | args += snap_names |
| @@ -324,7 +350,7 @@ def autosnap(): | |||
| 324 | await asyncio.gather(*tasks) | 350 | await asyncio.gather(*tasks) |
| 325 | asyncio.run(run_tasks()) | 351 | asyncio.run(run_tasks()) |
| 326 | for snap in all_snap_names: | 352 | for snap in all_snap_names: |
| 327 | logger.info(f'Created ‘{snap}’') | 353 | logger.info('Created ‘%s’', snap) |
| 328 | if all_snap_names: | 354 | if all_snap_names: |
| 329 | rename(snapshots=all_snap_names) | 355 | rename(snapshots=all_snap_names) |
| 330 | 356 | ||
| @@ -347,7 +373,8 @@ def main(): | |||
| 347 | sys.excepthook = log_exceptions | 373 | sys.excepthook = log_exceptions |
| 348 | 374 | ||
| 349 | parser = argparse.ArgumentParser(prog='zfssnap') | 375 | parser = argparse.ArgumentParser(prog='zfssnap') |
| 350 | parser.add_argument('--verbose', '-v', action='count', default=0) | 376 | parser.add_argument('--verbose', '-v', dest='log_level', action='append_const', const=-1) |
| 377 | parser.add_argument('--quiet', '-q', dest='log_level', action='append_const', const=1) | ||
| 351 | subparsers = parser.add_subparsers() | 378 | subparsers = parser.add_subparsers() |
| 352 | parser.set_defaults(cmd=autosnap) | 379 | parser.set_defaults(cmd=autosnap) |
| 353 | rename_parser = subparsers.add_parser('rename') | 380 | rename_parser = subparsers.add_parser('rename') |
| @@ -359,26 +386,29 @@ def main(): | |||
| 359 | prune_parser.add_argument('--config', '-c', dest='config_files', nargs='*', default=list()) | 386 | prune_parser.add_argument('--config', '-c', dest='config_files', nargs='*', default=list()) |
| 360 | prune_parser.add_argument('--dry-run', '-n', action='store_true', default=False) | 387 | prune_parser.add_argument('--dry-run', '-n', action='store_true', default=False) |
| 361 | prune_parser.add_argument('--keep-newest', action='store_true', default=False) | 388 | prune_parser.add_argument('--keep-newest', action='store_true', default=False) |
| 389 | prune_parser.add_argument('--exec-newest', action='store_true', default=False) | ||
| 362 | prune_parser.add_argument('--no-exec', dest='do_exec', action='store_false', default=True) | 390 | prune_parser.add_argument('--no-exec', dest='do_exec', action='store_false', default=True) |
| 363 | prune_parser.set_defaults(cmd=prune) | 391 | prune_parser.set_defaults(cmd=prune) |
| 364 | args = parser.parse_args() | 392 | args = parser.parse_args() |
| 365 | 393 | ||
| 366 | if args.verbose <= 0: | 394 | |
| 367 | logger.setLevel(logging.WARNING) | 395 | LOG_LEVELS = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL] |
| 368 | elif args.verbose <= 1: | 396 | DEFAULT_LOG_LEVEL = logging.ERROR |
| 369 | logger.setLevel(logging.INFO) | 397 | log_level = LOG_LEVELS.index(DEFAULT_LOG_LEVEL) |
| 370 | else: | 398 | |
| 371 | logger.setLevel(logging.DEBUG) | 399 | for adjustment in args.log_level or (): |
| 400 | log_level = min(len(LOG_LEVELS) - 1, max(log_level + adjustment, 0)) | ||
| 401 | logger.setLevel(LOG_LEVELS[log_level]) | ||
| 372 | 402 | ||
| 373 | cmdArgs = {} | 403 | cmdArgs = {} |
| 374 | for copy in {'snapshots', 'dry_run', 'destroy', 'keep_newest', 'set_is_auto', 'do_exec'}: | 404 | for copy in {'snapshots', 'dry_run', 'destroy', 'keep_newest', 'exec_newest', 'set_is_auto', 'do_exec'}: |
| 375 | if copy in vars(args): | 405 | if copy in vars(args): |
| 376 | cmdArgs[copy] = vars(args)[copy] | 406 | cmdArgs[copy] = vars(args)[copy] |
| 377 | if 'config_files' in vars(args): | 407 | if 'config_files' in vars(args): |
| 378 | def convert_timedelta(secs_str): | 408 | def convert_timedelta(secs_str): |
| 379 | secs=pytimeparse.parse(secs_str) | 409 | secs=pytimeparse.parse(secs_str) |
| 380 | if secs is None: | 410 | if secs is None: |
| 381 | raise ValueError(f'Could not parse timedelta expression ‘{secs_str}’') | 411 | raise ValueError('Could not parse timedelta expression ‘%s’', secs_str) |
| 382 | return timedelta(seconds=secs) | 412 | return timedelta(seconds=secs) |
| 383 | config = configparser.ConfigParser(converters={ | 413 | config = configparser.ConfigParser(converters={ |
| 384 | 'timedelta': convert_timedelta, | 414 | 'timedelta': convert_timedelta, |
| @@ -393,9 +423,9 @@ def main(): | |||
| 393 | return ', '.join(map(lambda file: f'‘{file}’', files)) | 423 | return ', '.join(map(lambda file: f'‘{file}’', files)) |
| 394 | 424 | ||
| 395 | if not read_files: | 425 | if not read_files: |
| 396 | raise Exception(f'Found no config files. Tried: {format_config_files(search_files)}') | 426 | raise Exception('Found no config files. Tried: %s', format_config_files(search_files)) |
| 397 | 427 | ||
| 398 | logger.debug(f'Read following config files: {format_config_files(read_files)}') | 428 | logger.debug('Read following config files: %s', format_config_files(read_files)) |
| 399 | 429 | ||
| 400 | cmdArgs['config'] = config | 430 | cmdArgs['config'] = config |
| 401 | 431 | ||
