summaryrefslogtreecommitdiff
path: root/modules/zfssnap/zfssnap.py
diff options
context:
space:
mode:
Diffstat (limited to 'modules/zfssnap/zfssnap.py')
-rw-r--r--modules/zfssnap/zfssnap.py405
1 files changed, 0 insertions, 405 deletions
diff --git a/modules/zfssnap/zfssnap.py b/modules/zfssnap/zfssnap.py
deleted file mode 100644
index a8dae75f..00000000
--- a/modules/zfssnap/zfssnap.py
+++ /dev/null
@@ -1,405 +0,0 @@
1#!@python@/bin/python
2
3import csv
4import subprocess
5import io
6from distutils.util import strtobool
7from datetime import datetime, timezone, timedelta
8from dateutil.tz import gettz, tzutc
9import pytimeparse
10import argparse
11import re
12
13import sys
14
15import logging
16
17import shlex
18
19from collections import defaultdict, OrderedDict, deque, namedtuple
20
21import configparser
22from xdg import BaseDirectory
23
24from functools import cache
25
26from math import floor
27
28import asyncio
29
30from dataclasses import dataclass
31
32
33TIME_PATTERNS = OrderedDict([
34 ("secondly", lambda t: t.strftime('%Y-%m-%d %H:%M:%S')),
35 ("minutely", lambda t: t.strftime('%Y-%m-%d %H:%M')),
36 ("5m", lambda t: (t.strftime('%Y-%m-%d %H'), floor(t.minute / 5) * 5)),
37 ("15m", lambda t: (t.strftime('%Y-%m-%d %H'), floor(t.minute / 15) * 15)),
38 ("hourly", lambda t: t.strftime('%Y-%m-%d %H')),
39 ("4h", lambda t: (t.strftime('%Y-%m-%d'), floor(t.hour / 4) * 4)),
40 ("12h", lambda t: (t.strftime('%Y-%m-%d'), floor(t.hour / 12) * 12)),
41 ("daily", lambda t: t.strftime('%Y-%m-%d')),
42 ("halfweekly", lambda t: (t.strftime('%G-%V'), floor(int(t.strftime('%u')) / 4) * 4)),
43 ("weekly", lambda t: t.strftime('%G-%V')),
44 ("monthly", lambda t: t.strftime('%Y-%m')),
45 ("yearly", lambda t: t.strftime('%Y')),
46])
47
48@dataclass(eq=True, order=True, frozen=True)
49class Snap:
50 name: str
51 creation: datetime
52
53@dataclass(eq=True, order=True, frozen=True)
54class KeptBecause:
55 rule: str
56 ix: int
57 base: str
58 period: str
59
60
61@cache
62def _now():
63 return datetime.now(timezone.utc)
64
65def _snap_name(item, time=_now()):
66 suffix = re.sub(r'\+00:00$', r'Z', time.isoformat(timespec='seconds'))
67 return f'{item}@{suffix}'
68
69def _log_cmd(*args):
70 fmt_args = ' '.join(map(shlex.quote, args))
71 logger.debug(f'Running command: {fmt_args}')
72
73def _get_items():
74 items = {}
75
76 args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', '-t', 'filesystem,volume', '-s', 'local,default,inherited,temporary,received', 'li.yggdrasil:auto-snapshot']
77 _log_cmd(*args)
78 with subprocess.Popen(args, stdout=subprocess.PIPE) as proc:
79 text_stdout = io.TextIOWrapper(proc.stdout)
80 reader = csv.DictReader(text_stdout, fieldnames=['name', 'setting'], delimiter='\t', quoting=csv.QUOTE_NONE)
81 Row = namedtuple('Row', reader.fieldnames)
82 for row in [Row(**data) for data in reader]:
83 items[row.name] = bool(strtobool(row.setting))
84
85 return items
86
87def _get_snaps(only_auto=True):
88 snapshots = defaultdict(list)
89 args = ['zfs', 'list', '-H', '-p', '-t', 'snapshot', '-o', 'name,li.yggdrasil:is-auto-snapshot,creation']
90 _log_cmd(*args)
91 with subprocess.Popen(args, stdout=subprocess.PIPE) as proc:
92 text_stdout = io.TextIOWrapper(proc.stdout)
93 reader = csv.DictReader(text_stdout, fieldnames=['name', 'is_auto_snapshot', 'timestamp'], delimiter='\t', quoting=csv.QUOTE_NONE)
94 Row = namedtuple('Row', reader.fieldnames)
95 for row in [Row(**data) for data in reader]:
96 if only_auto and not bool(strtobool(row.is_auto_snapshot)):
97 continue
98
99 base_name, _, _ = row.name.rpartition('@')
100 creation = datetime.fromtimestamp(int(row.timestamp), timezone.utc)
101 snapshots[base_name].append(Snap(name=row.name, creation=creation))
102
103 return snapshots
104
105def prune(config, dry_run, keep_newest, do_exec):
106 do_exec = do_exec and 'EXEC' in config
107 prune_timezone = config.gettimezone('KEEP', 'timezone', fallback=tzutc())
108 logger.debug(f'prune timezone: {prune_timezone}')
109
110 items = _get_snaps()
111
112 exec_candidates = set()
113 if do_exec:
114 exec_timezone = config.gettimezone('EXEC', 'timezone', fallback=prune_timezone)
115 logger.debug(f'exec timezone: {exec_timezone}')
116
117 for rule, pattern in TIME_PATTERNS.items():
118 desired_count = config.getint('EXEC', rule, fallback=0)
119
120 for base, snaps in items.items():
121 periods = OrderedDict()
122
123 for snap in sorted(snaps, key=lambda snap: snap.creation):
124 period = pattern(snap.creation.astimezone(exec_timezone))
125 if period not in periods:
126 periods[period] = deque()
127 periods[period].append(snap)
128
129 to_exec = desired_count
130 ordered_periods = periods.items()
131 for period, period_snaps in ordered_periods:
132 if to_exec == 0:
133 break
134
135 for snap in period_snaps:
136 exec_candidates.add(snap)
137 logger.debug(f'{snap.name} is exec candidate')
138 to_exec -= 1
139 break
140
141 if to_exec > 0:
142 logger.debug(f'Missing {to_exec} to fulfill exec {rule}={desired_count} for ‘{base}’')
143
144 check_cmd = config.get('EXEC', 'check', fallback=None)
145 if check_cmd:
146 already_execed = set()
147 for snap in exec_candidates:
148 args = []
149 args += shlex.split(check_cmd)
150 args += [snap.name]
151 _log_cmd(*args)
152 check_res = subprocess.run(args)
153 if check_res.returncode == 0:
154 already_execed.add(snap)
155 logger.debug(f'{snap.name} already execed')
156 exec_candidates -= already_execed
157
158 exec_cmd = config.get('EXEC', 'cmd', fallback=None)
159 exec_count = config.getint('EXEC', 'count', fallback=1)
160 if exec_cmd:
161 execed = set()
162 for snap in sorted(exec_candidates, key=lambda snap: snap.creation):
163 if len(execed) >= exec_count:
164 logger.debug(f'exc_count of {exec_count} reached')
165 break
166
167 args = []
168 args += shlex.split(exec_cmd)
169 args += [snap.name]
170 _log_cmd(*args)
171 subprocess.run(args).check_returncode()
172 execed.add(snap)
173
174 exec_candidates -= execed
175
176 kept_count = defaultdict(lambda: defaultdict(lambda: 0))
177 kept_because = OrderedDict()
178 def keep_because(base, snap, rule, period=None):
179 nonlocal kept_count, kept_because
180 kept_count[rule][base] += 1
181 if snap not in kept_because:
182 kept_because[snap] = deque()
183 kept_because[snap].append(KeptBecause(rule=rule, ix=kept_count[rule][base], base=base, period=period))
184
185 for candidate in exec_candidates:
186 base_name, _, _ = candidate.name.rpartition('@')
187 keep_because(base_name, candidate.name, 'exec-candidate')
188
189 within = config.gettimedelta('KEEP', 'within')
190 if within > timedelta(seconds=0):
191 for base, snaps in items.items():
192 time_ref = max(snaps, key=lambda snap: snap.creation, default=None)
193 if not time_ref:
194 logger.warn(f'Nothing to keep for ‘{base}’')
195 continue
196
197 logger.info(f'Using ‘{time_ref.name}’ as time reference for ‘{base}’')
198 within_cutoff = time_ref.creation - within
199
200 for snap in snaps:
201 if snap.creation >= within_cutoff:
202 keep_because(base, snap.name, 'within')
203 else:
204 logger.warn('Skipping rule ‘within’ since retention period is zero')
205
206 for rule, pattern in TIME_PATTERNS.items():
207 desired_count = config.getint('KEEP', rule, fallback=0)
208
209 for base, snaps in items.items():
210 periods = OrderedDict()
211
212 for snap in sorted(snaps, key=lambda snap: snap.creation, reverse=keep_newest):
213 period = pattern(snap.creation.astimezone(prune_timezone))
214 if period not in periods:
215 periods[period] = deque()
216 periods[period].append(snap)
217
218 to_keep = desired_count
219 ordered_periods = periods.items() if keep_newest else reversed(periods.items())
220 for period, period_snaps in ordered_periods:
221 if to_keep == 0:
222 break
223
224 for snap in period_snaps:
225 keep_because(base, snap.name, rule, period=period)
226 to_keep -= 1
227 break
228
229 if to_keep > 0:
230 logger.debug(f'Missing {to_keep} to fulfill prune {rule}={desired_count} for ‘{base}’')
231
232 for snap, reasons in kept_because.items():
233 reasons_str = ', '.join(map(str, reasons))
234 logger.info(f'Keeping ‘{snap}’ because: {reasons_str}')
235 all_snaps = {snap.name for _, snaps in items.items() for snap in snaps}
236 to_destroy = all_snaps - {*kept_because}
237 if not to_destroy:
238 logger.info('Nothing to prune')
239
240 for snap in sorted(to_destroy):
241 args = ['zfs', 'destroy']
242 if dry_run:
243 args += ['-n']
244 args += [snap]
245 _log_cmd(*args)
246 subprocess.run(args, check=True)
247 if dry_run:
248 logger.info(f'Would have pruned ‘{snap}’')
249 else:
250 logger.info(f'Pruned ‘{snap}’')
251
252def rename(snapshots, destroy=False, set_is_auto=False):
253 args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', 'creation', *snapshots]
254 _log_cmd(*args)
255 renamed_to = set()
256 with subprocess.Popen(args, stdout=subprocess.PIPE) as proc:
257 text_stdout = io.TextIOWrapper(proc.stdout)
258 reader = csv.DictReader(text_stdout, fieldnames=['name', 'timestamp'], delimiter='\t', quoting=csv.QUOTE_NONE)
259 Row = namedtuple('Row', reader.fieldnames)
260 for row in [Row(**data) for data in reader]:
261 creation = datetime.fromtimestamp(int(row.timestamp), timezone.utc)
262 base_name, _, _ = row.name.rpartition('@')
263 new_name = _snap_name(base_name, time=creation)
264 if new_name == row.name:
265 logger.debug(f'Not renaming ‘{row.name}’ since name is already correct')
266 continue
267
268 if new_name in renamed_to:
269 if destroy:
270 logger.warning(f'Destroying ‘{row.name}’ since ‘{new_name}’ was already renamed to')
271 args = ['zfs', 'destroy', row.name]
272 _log_cmd(*args)
273 subprocess.run(args, check=True)
274 else:
275 logger.info(f'Skipping ‘{row.name}’ since ‘{new_name}’ was already renamed to')
276
277 continue
278
279 logger.info(f'Renaming ‘{row.name}’ to ‘{new_name}’')
280 args = ['zfs', 'rename', row.name, new_name]
281 _log_cmd(*args)
282 subprocess.run(args, check=True)
283 renamed_to.add(new_name)
284
285 if set_is_auto:
286 logger.info(f'Setting is-auto-snapshot on ‘{new_name}’')
287 args = ['zfs', 'set', 'li.yggdrasil:is-auto-snapshot=true', new_name]
288 _log_cmd(*args)
289 subprocess.run(args, check=True)
290
291def autosnap():
292 items = _get_items()
293
294 all_snap_names = set()
295 async def do_snapshot(*snap_items, recursive=False):
296 nonlocal items, all_snap_names
297 snap_names = {_snap_name(item) for item in snap_items if items[item]}
298 if recursive:
299 for snap_item in snap_items:
300 all_snap_names |= {_snap_name(item) for item in items if item.startswith(snap_item)}
301 else:
302 all_snap_names |= snap_names
303
304 args = ['zfs', 'snapshot', '-o', 'li.yggdrasil:is-auto-snapshot=true']
305 if recursive:
306 args += ['-r']
307 args += snap_names
308
309 _log_cmd(*args)
310 subprocess.run(args, check=True)
311
312 pool_items = defaultdict(set)
313 for item in items:
314 pool, _, _ = item.partition('/')
315 pool_items[pool].add(item)
316
317 tasks = []
318 for snap_items in pool_items.values():
319 tasks.append(do_snapshot(*snap_items))
320 if not tasks:
321 logger.warning('No snapshots to create')
322 else:
323 async def run_tasks():
324 await asyncio.gather(*tasks)
325 asyncio.run(run_tasks())
326 for snap in all_snap_names:
327 logger.info(f'Created ‘{snap}’')
328 if all_snap_names:
329 rename(snapshots=all_snap_names)
330
331def main():
332 global logger
333 logger = logging.getLogger(__name__)
334 console_handler = logging.StreamHandler()
335 console_handler.setFormatter( logging.Formatter('[%(levelname)s](%(name)s): %(message)s') )
336 if sys.stderr.isatty():
337 console_handler.setFormatter( logging.Formatter('%(asctime)s [%(levelname)s](%(name)s): %(message)s') )
338 logger.addHandler(console_handler)
339
340 # log uncaught exceptions
341 def log_exceptions(type, value, tb):
342 global logger
343
344 logger.error(value)
345 sys.__excepthook__(type, value, tb) # calls default excepthook
346
347 sys.excepthook = log_exceptions
348
349 parser = argparse.ArgumentParser(prog='zfssnap')
350 parser.add_argument('--verbose', '-v', action='count', default=0)
351 subparsers = parser.add_subparsers()
352 parser.set_defaults(cmd=autosnap)
353 rename_parser = subparsers.add_parser('rename')
354 rename_parser.add_argument('snapshots', nargs='+')
355 rename_parser.add_argument('--destroy', action='store_true', default=False)
356 rename_parser.add_argument('--set-is-auto', action='store_true', default=False)
357 rename_parser.set_defaults(cmd=rename)
358 prune_parser = subparsers.add_parser('prune')
359 prune_parser.add_argument('--config', '-c', dest='config_files', nargs='*', default=list())
360 prune_parser.add_argument('--dry-run', '-n', action='store_true', default=False)
361 prune_parser.add_argument('--keep-newest', action='store_true', default=False)
362 prune_parser.add_argument('--no-exec', dest='do_exec', action='store_false', default=True)
363 prune_parser.set_defaults(cmd=prune)
364 args = parser.parse_args()
365
366 if args.verbose <= 0:
367 logger.setLevel(logging.WARNING)
368 elif args.verbose <= 1:
369 logger.setLevel(logging.INFO)
370 else:
371 logger.setLevel(logging.DEBUG)
372
373 cmdArgs = {}
374 for copy in {'snapshots', 'dry_run', 'destroy', 'keep_newest', 'set_is_auto', 'do_exec'}:
375 if copy in vars(args):
376 cmdArgs[copy] = vars(args)[copy]
377 if 'config_files' in vars(args):
378 def convert_timedelta(secs_str):
379 secs=pytimeparse.parse(secs_str)
380 if secs is None:
381 raise ValueError(f'Could not parse timedelta expression ‘{secs_str}’')
382 return timedelta(seconds=secs)
383 config = configparser.ConfigParser(converters={
384 'timedelta': convert_timedelta,
385 'timezone': gettz
386 })
387 search_files = args.config_files if args.config_files else [*BaseDirectory.load_config_paths('zfssnap.ini')]
388 read_files = config.read(search_files)
389
390 def format_config_files(files):
391 if not files:
392 return 'no files'
393 return ', '.join(map(lambda file: f'‘{file}’', files))
394
395 if not read_files:
396 raise Exception(f'Found no config files. Tried: {format_config_files(search_files)}')
397
398 logger.debug(f'Read following config files: {format_config_files(read_files)}')
399
400 cmdArgs['config'] = config
401
402 args.cmd(**cmdArgs)
403
404if __name__ == '__main__':
405 sys.exit(main())