From b931543508377c0e48a6801e4ea217eb523e2b03 Mon Sep 17 00:00:00 2001 From: Gregor Kleen Date: Tue, 13 Sep 2022 10:29:35 +0200 Subject: ... --- _sources/generated.json | 58 ++++-- _sources/generated.nix | 46 +++-- accounts/gkleen@sif/default.nix | 4 +- .../ccert_policy_server/__init__.py | 0 .../ccert_policy_server/__main__.py | 92 ++++++++++ hosts/surtr/email/ccert-policy-server/setup.py | 12 ++ hosts/surtr/email/default.nix | 88 ++++++++- hosts/surtr/postgresql.nix | 11 +- hosts/vidhar/borg/borgsnap/borgsnap/__main__.py | 202 +++++++++++++++++++++ hosts/vidhar/borg/borgsnap/setup.py | 10 + hosts/vidhar/borg/copy.py | 4 +- hosts/vidhar/borg/default.nix | 92 ++++++++-- hosts/vidhar/default.nix | 39 +++- hosts/vidhar/prometheus/default.nix | 50 ++--- hosts/vidhar/zfs.nix | 16 +- modules/postfwd.nix | 65 +++++++ modules/zfssnap/default.nix | 6 +- modules/zfssnap/zfssnap.py | 197 ++++++++++++++------ nvfetcher.toml | 12 +- overlays/postfwd.nix | 32 ++++ overlays/smartprom/default.nix | 19 ++ overlays/smartprom/setup.py | 11 ++ overlays/worktime/worktime.py | 58 +++--- user-profiles/utils.nix | 1 + 24 files changed, 951 insertions(+), 174 deletions(-) create mode 100644 hosts/surtr/email/ccert-policy-server/ccert_policy_server/__init__.py create mode 100644 hosts/surtr/email/ccert-policy-server/ccert_policy_server/__main__.py create mode 100644 hosts/surtr/email/ccert-policy-server/setup.py create mode 100644 hosts/vidhar/borg/borgsnap/borgsnap/__main__.py create mode 100644 hosts/vidhar/borg/borgsnap/setup.py create mode 100644 modules/postfwd.nix create mode 100644 overlays/postfwd.nix create mode 100644 overlays/smartprom/default.nix create mode 100644 overlays/smartprom/setup.py diff --git a/_sources/generated.json b/_sources/generated.json index 78285ff5..dcbde8b1 100644 --- a/_sources/generated.json +++ b/_sources/generated.json @@ -64,11 +64,11 @@ "pinned": false, "src": { "name": null, - "sha256": "sha256-eOq2PYvLB6ueIjC8Rif/p7HJKW8AkbEjf1La9/HUaC8=", + "sha256": "sha256-9Gx7Cwb5UWE1NaSA0iun6FY/TwjT0/jjcAR98SLLFjc=", "type": "url", - "url": "https://github.com/wofr06/lesspipe/archive/refs/tags/v2.05.tar.gz" + "url": "https://github.com/wofr06/lesspipe/archive/refs/tags/v2.06.tar.gz" }, - "version": "2.05" + "version": "2.06" }, "mpv-autosave": { "cargoLocks": null, @@ -101,11 +101,11 @@ "name": null, "owner": "po5", "repo": "chapterskip", - "rev": "f4c5da3e7661212eb491cc1d85beafbf951e32f0", - "sha256": "sha256-ZK64WdhXMubzfkKFVg7rX5dPc4IVHDwN0T1X9tXwsJI=", + "rev": "b26825316e3329882206ae78dc903ebc4613f039", + "sha256": "sha256-OTrLQE3rYvPQamEX23D6HttNjx3vafWdTMxTiWpDy90=", "type": "github" }, - "version": "f4c5da3e7661212eb491cc1d85beafbf951e32f0" + "version": "b26825316e3329882206ae78dc903ebc4613f039" }, "mpv-createchapter": { "cargoLocks": null, @@ -139,11 +139,11 @@ "name": null, "owner": "hoyon", "repo": "mpv-mpris", - "rev": "0.8.1", - "sha256": "sha256-ugEiQZA1vQCVwyv3ViM84Qz8lhRvy17vcxjayYevTAs=", + "rev": "0.9", + "sha256": "sha256-leW7oCWTnJuprVnJJ+iXd3nuB2VXl3fw8FmPxv7d6rA=", "type": "github" }, - "version": "0.8.1" + "version": "0.9" }, "mpv-reload": { "cargoLocks": null, @@ -172,11 +172,25 @@ "pinned": false, "src": { "name": null, - "sha256": "sha256-snvUmKZVckDNt2nnFOEa4cbGLtm825UgvA3cBpoNGLw=", + "sha256": "sha256-3vB6krsP6G25bviG27QI+9NyJN2YKOOmM5KhKUclJPc=", "type": "url", - "url": "https://github.com/Snawoot/postfix-mta-sts-resolver/archive/refs/tags/v1.1.3.tar.gz" + "url": "https://github.com/Snawoot/postfix-mta-sts-resolver/archive/refs/tags/v1.1.4.tar.gz" }, - "version": "1.1.3" + "version": "1.1.4" + }, + "postfwd": { + "cargoLocks": null, + "extract": null, + "name": "postfwd", + "passthru": null, + "pinned": false, + "src": { + "name": null, + "sha256": "sha256-mMKXzeqg2PfXkvGL7qugOelm/I2fZnUidq6/ugXDHa0=", + "type": "url", + "url": "https://github.com/postfwd/postfwd/archive/refs/tags/v2.03.tar.gz" + }, + "version": "2.03" }, "psql-versioning": { "cargoLocks": null, @@ -196,6 +210,20 @@ }, "version": "3e578ff5e5aa6c7e5459dbfa842a64a1b2674b2e" }, + "smartprom": { + "cargoLocks": null, + "extract": null, + "name": "smartprom", + "passthru": null, + "pinned": false, + "src": { + "name": null, + "sha256": "sha256-VbpFvDBygJswUfmufVjo/xXxDDmXLq/0D9ln8u+139E=", + "type": "url", + "url": "https://github.com/matusnovak/prometheus-smartctl/archive/refs/tags/v2.1.0.tar.gz" + }, + "version": "2.1.0" + }, "uhk-agent": { "cargoLocks": null, "extract": null, @@ -223,11 +251,11 @@ "name": null, "owner": "umlaeute", "repo": "v4l2loopback", - "rev": "4aadc417254bfa3b875bf0b69278ce400ce659b2", - "sha256": "sha256-nHxIW5BmaZC6g7SElxboTcwtMDF4SCqi11MjYWsUZpo=", + "rev": "76434ab6f71d5ecbff8a218ff6bed91ea2bf73b8", + "sha256": "sha256-c6g63jW+a+v/TxLD9NnQGn/aUgivwVkxzP+hZ65w2/o=", "type": "github" }, - "version": "4aadc417254bfa3b875bf0b69278ce400ce659b2" + "version": "76434ab6f71d5ecbff8a218ff6bed91ea2bf73b8" }, "xcompose": { "cargoLocks": null, diff --git a/_sources/generated.nix b/_sources/generated.nix index 8aecd856..a77cb5d8 100644 --- a/_sources/generated.nix +++ b/_sources/generated.nix @@ -38,10 +38,10 @@ }; lesspipe = { pname = "lesspipe"; - version = "2.05"; + version = "2.06"; src = fetchurl { - url = "https://github.com/wofr06/lesspipe/archive/refs/tags/v2.05.tar.gz"; - sha256 = "sha256-eOq2PYvLB6ueIjC8Rif/p7HJKW8AkbEjf1La9/HUaC8="; + url = "https://github.com/wofr06/lesspipe/archive/refs/tags/v2.06.tar.gz"; + sha256 = "sha256-9Gx7Cwb5UWE1NaSA0iun6FY/TwjT0/jjcAR98SLLFjc="; }; }; mpv-autosave = { @@ -58,13 +58,13 @@ }; mpv-chapterskip = { pname = "mpv-chapterskip"; - version = "f4c5da3e7661212eb491cc1d85beafbf951e32f0"; + version = "b26825316e3329882206ae78dc903ebc4613f039"; src = fetchFromGitHub ({ owner = "po5"; repo = "chapterskip"; - rev = "f4c5da3e7661212eb491cc1d85beafbf951e32f0"; + rev = "b26825316e3329882206ae78dc903ebc4613f039"; fetchSubmodules = false; - sha256 = "sha256-ZK64WdhXMubzfkKFVg7rX5dPc4IVHDwN0T1X9tXwsJI="; + sha256 = "sha256-OTrLQE3rYvPQamEX23D6HttNjx3vafWdTMxTiWpDy90="; }); }; mpv-createchapter = { @@ -80,13 +80,13 @@ }; mpv-mpris = { pname = "mpv-mpris"; - version = "0.8.1"; + version = "0.9"; src = fetchFromGitHub ({ owner = "hoyon"; repo = "mpv-mpris"; - rev = "0.8.1"; + rev = "0.9"; fetchSubmodules = false; - sha256 = "sha256-ugEiQZA1vQCVwyv3ViM84Qz8lhRvy17vcxjayYevTAs="; + sha256 = "sha256-leW7oCWTnJuprVnJJ+iXd3nuB2VXl3fw8FmPxv7d6rA="; }); }; mpv-reload = { @@ -102,10 +102,18 @@ }; postfix-mta-sts-resolver = { pname = "postfix-mta-sts-resolver"; - version = "1.1.3"; + version = "1.1.4"; src = fetchurl { - url = "https://github.com/Snawoot/postfix-mta-sts-resolver/archive/refs/tags/v1.1.3.tar.gz"; - sha256 = "sha256-snvUmKZVckDNt2nnFOEa4cbGLtm825UgvA3cBpoNGLw="; + url = "https://github.com/Snawoot/postfix-mta-sts-resolver/archive/refs/tags/v1.1.4.tar.gz"; + sha256 = "sha256-3vB6krsP6G25bviG27QI+9NyJN2YKOOmM5KhKUclJPc="; + }; + }; + postfwd = { + pname = "postfwd"; + version = "2.03"; + src = fetchurl { + url = "https://github.com/postfwd/postfwd/archive/refs/tags/v2.03.tar.gz"; + sha256 = "sha256-mMKXzeqg2PfXkvGL7qugOelm/I2fZnUidq6/ugXDHa0="; }; }; psql-versioning = { @@ -120,6 +128,14 @@ sha256 = "sha256-j+njRssJHTdNV3FbcA3MdUmzCaJxuYBrC0qwtK3HoyY="; }; }; + smartprom = { + pname = "smartprom"; + version = "2.1.0"; + src = fetchurl { + url = "https://github.com/matusnovak/prometheus-smartctl/archive/refs/tags/v2.1.0.tar.gz"; + sha256 = "sha256-VbpFvDBygJswUfmufVjo/xXxDDmXLq/0D9ln8u+139E="; + }; + }; uhk-agent = { pname = "uhk-agent"; version = "1.5.17"; @@ -130,13 +146,13 @@ }; v4l2loopback = { pname = "v4l2loopback"; - version = "4aadc417254bfa3b875bf0b69278ce400ce659b2"; + version = "76434ab6f71d5ecbff8a218ff6bed91ea2bf73b8"; src = fetchFromGitHub ({ owner = "umlaeute"; repo = "v4l2loopback"; - rev = "4aadc417254bfa3b875bf0b69278ce400ce659b2"; + rev = "76434ab6f71d5ecbff8a218ff6bed91ea2bf73b8"; fetchSubmodules = true; - sha256 = "sha256-nHxIW5BmaZC6g7SElxboTcwtMDF4SCqi11MjYWsUZpo="; + sha256 = "sha256-c6g63jW+a+v/TxLD9NnQGn/aUgivwVkxzP+hZ65w2/o="; }); }; xcompose = { diff --git a/accounts/gkleen@sif/default.nix b/accounts/gkleen@sif/default.nix index d3db91c8..2cfaa620 100644 --- a/accounts/gkleen@sif/default.nix +++ b/accounts/gkleen@sif/default.nix @@ -258,12 +258,14 @@ in { screen-locker = { enable = true; lockCmd = toString (pkgs.writeShellScript "lock" '' + ${pkgs.playerctl}/bin/playerctl -a status | ${pkgs.gnugrep}/bin/grep -q "Playing" && exit 0 + cleanup() { ${cfg.services.dunst.package}/bin/dunstctl set-paused false } trap cleanup EXIT INT TERM - ${pkgs.playerctl}/bin/playerctl -a pause + # ${pkgs.playerctl}/bin/playerctl -a pause ${cfg.services.dunst.package}/bin/dunstctl set-paused true ${pkgs.xsecurelock}/bin/xsecurelock ''); diff --git a/hosts/surtr/email/ccert-policy-server/ccert_policy_server/__init__.py b/hosts/surtr/email/ccert-policy-server/ccert_policy_server/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hosts/surtr/email/ccert-policy-server/ccert_policy_server/__main__.py b/hosts/surtr/email/ccert-policy-server/ccert_policy_server/__main__.py new file mode 100644 index 00000000..f481090c --- /dev/null +++ b/hosts/surtr/email/ccert-policy-server/ccert_policy_server/__main__.py @@ -0,0 +1,92 @@ +from systemd.daemon import listen_fds +from sdnotify import SystemdNotifier +from socketserver import StreamRequestHandler, ThreadingMixIn +from systemd_socketserver import SystemdSocketServer +import sys +from threading import Thread +from psycopg_pool import ConnectionPool +from psycopg.rows import namedtuple_row + +import logging + + +class PolicyHandler(StreamRequestHandler): + def handle(self): + logger.debug('Handling new connection...') + + self.args = dict() + + line = None + while line := self.rfile.readline().removesuffix(b'\n'): + if b'=' not in line: + break + + key, val = line.split(sep=b'=', maxsplit=1) + self.args[key.decode()] = val.decode() + + logger.info('Connection parameters: %s', self.args) + + allowed = False + with self.server.db_pool.connection() as conn: + local, domain = self.args['sender'].split(sep='@', maxsplit=1) + extension = None + if '+' in local: + local, extension = local.split(sep='+', maxsplit=1) + + logger.debug('Parsed address: %s', {'local': local, 'extension': extension, 'domain': domain}) + + with conn.cursor() as cur: + cur.row_factory = namedtuple_row + cur.execute('SELECT "mailbox"."mailbox" as "user", "local", "extension", "domain" FROM "mailbox" INNER JOIN "mailbox_mapping" ON "mailbox".id = "mailbox_mapping"."mailbox" WHERE "mailbox"."mailbox" = %(user)s AND ("local" = %(local)s OR "local" IS NULL) AND ("extension" = %(extension)s OR "extension" IS NULL) AND "domain" = %(domain)s', params = {'user': self.args['ccert_subject'], 'local': local, 'extension': extension if extension is not None else '', 'domain': domain}, prepare=True) + for record in cur: + logger.debug('Received result: %s', record) + allowed = True + + action = '550 5.7.0 Sender address not authorized for current user' + if allowed: + action = 'DUNNO' + + logger.info('Reached verdict: %s', {'allowed': allowed, 'action': action}) + self.wfile.write(f'action={action}\n\n'.encode()) + +class ThreadedSystemdSocketServer(ThreadingMixIn, SystemdSocketServer): + def __init__(self, fd, RequestHandlerClass): + super().__init__(fd, RequestHandlerClass) + + self.db_pool = ConnectionPool(min_size=1) + self.db_pool.wait() + +def main(): + global logger + logger = logging.getLogger(__name__) + console_handler = logging.StreamHandler() + console_handler.setFormatter( logging.Formatter('[%(levelname)s](%(name)s): %(message)s') ) + if sys.stderr.isatty(): + console_handler.setFormatter( logging.Formatter('%(asctime)s [%(levelname)s](%(name)s): %(message)s') ) + logger.addHandler(console_handler) + logger.setLevel(logging.DEBUG) + + # log uncaught exceptions + def log_exceptions(type, value, tb): + global logger + + logger.error(value) + sys.__excepthook__(type, value, tb) # calls default excepthook + + sys.excepthook = log_exceptions + + fds = listen_fds() + servers = [ThreadedSystemdSocketServer(fd, PolicyHandler) for fd in fds] + + if servers: + for server in servers: + Thread(name=f'Server for fd{server.fileno()}', target=server.serve_forever).start() + else: + return 2 + + SystemdNotifier().notify('READY=1') + + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/hosts/surtr/email/ccert-policy-server/setup.py b/hosts/surtr/email/ccert-policy-server/setup.py new file mode 100644 index 00000000..d8eb415a --- /dev/null +++ b/hosts/surtr/email/ccert-policy-server/setup.py @@ -0,0 +1,12 @@ +from setuptools import setup, find_packages + +setup( + name = 'ccert-policy-server', + version = '0.0.0', + packages = ['ccert_policy_server'], + entry_points = { + 'console_scripts': [ + 'ccert-policy-server=ccert_policy_server.__main__:main' + ], + }, +) diff --git a/hosts/surtr/email/default.nix b/hosts/surtr/email/default.nix index 83bf02f5..9cfba1f1 100644 --- a/hosts/surtr/email/default.nix +++ b/hosts/surtr/email/default.nix @@ -1,4 +1,4 @@ -{ config, pkgs, lib, ... }: +{ config, pkgs, lib, flakeInputs, ... }: with lib; @@ -20,6 +20,27 @@ let ''; }; + ccert-policy-server = flakeInputs.mach-nix.lib.${config.nixpkgs.system}.buildPythonPackage { + src = ./ccert-policy-server; + pname = "ccert-policy-server"; + version = "0.0.0"; + + python = "python39"; + ignoreDataOutdated = true; + + requirements = '' + sdnotify + systemd-socketserver + psycopg >=3.0.0 + psycopg-pool >=3.0.0 + psycopg-binary >=3.0.0 + ''; + + overridesPre = [ + (self: super: { systemd-python = super.systemd.overrideAttrs (oldAttrs: { pname = "systemd-python"; }); }) + ]; + }; + spmDomains = ["bouncy.email"]; in { config = { @@ -35,7 +56,7 @@ in { }; }) ]; - + services.postfix = { enable = true; hostname = "surtr.yggdrasil.li"; @@ -187,8 +208,9 @@ in { "-o" "smtpd_tls_ask_ccert=yes" "-o" "smtpd_tls_req_ccert=yes" "-o" "smtpd_client_restrictions=permit_tls_all_clientcerts,reject" + "-o" "{smtpd_data_restrictions = check_policy_service unix:/run/postfwd3/postfwd3.sock}" "-o" "smtpd_relay_restrictions=permit_tls_all_clientcerts,reject" - "-o" "smtpd_sender_restrictions=reject_unknown_sender_domain,reject_unverified_sender" + "-o" "{smtpd_sender_restrictions = reject_unknown_sender_domain,reject_unverified_sender,check_policy_service unix:/run/postfix-ccert-sender-policy.sock}" "-o" "unverified_sender_reject_code=550" "-o" "unverified_sender_reject_reason={Sender address rejected: undeliverable address}" "-o" "smtpd_recipient_restrictions=reject_unauth_pipelining,reject_non_fqdn_recipient,reject_unknown_recipient_domain,permit_tls_all_clientcerts,reject" @@ -415,7 +437,7 @@ in { mail_plugins = $mail_plugins quota mailbox_list_index = yes postmaster_address = postmaster@yggdrasil.li - recipient_delimiter = + recipient_delimiter = auth_username_chars = abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890.-+_@ service lmtp { @@ -431,7 +453,7 @@ in { namespace inbox { separator = / inbox = yes - prefix = + prefix = mailbox Trash { auto = no @@ -602,7 +624,7 @@ in { ${pkgs.dovecot_pigeonhole}/bin/sievec $f done ''; - + serviceConfig = { LoadCredential = [ "surtr.yggdrasil.li.key.pem:${config.security.acme.certs."surtr.yggdrasil.li".directory}/key.pem" @@ -703,7 +725,7 @@ in { }; systemd.sockets.spm = { wantedBy = [ "nginx.service" ]; - + socketConfig = { ListenStream = "/run/spm/server.sock"; SocketUser = "spm"; @@ -730,5 +752,57 @@ in { enable = true; loglevel = "debug"; }; + + systemd.sockets."postfix-ccert-sender-policy" = { + requiredBy = ["postfix.service"]; + wants = ["postfix-ccert-sender-policy.service"]; + socketConfig = { + ListenStream = "/run/postfix-ccert-sender-policy.sock"; + }; + }; + systemd.services."postfix-ccert-sender-policy" = { + serviceConfig = { + Type = "notify"; + + ExecStart = "${ccert-policy-server}/bin/ccert-policy-server"; + + Environment = [ + "PGDATABASE=email" + ]; + + DynamicUser = false; + User = "postfix-ccert-sender-policy"; + Group = "postfix-ccert-sender-policy"; + ProtectSystem = "strict"; + SystemCallFilter = "@system-service"; + NoNewPrivileges = true; + ProtectKernelTunables = true; + ProtectKernelModules = true; + ProtectKernelLogs = true; + ProtectControlGroups = true; + MemoryDenyWriteExecute = true; + RestrictSUIDSGID = true; + KeyringMode = "private"; + ProtectClock = true; + RestrictRealtime = true; + PrivateDevices = true; + PrivateTmp = true; + ProtectHostname = true; + ReadWritePaths = ["/run/postgresql"]; + }; + }; + users.users."postfix-ccert-sender-policy" = { + isSystemUser = true; + group = "postfix-ccert-sender-policy"; + }; + users.groups."postfix-ccert-sender-policy" = {}; + + services.postfwd = { + enable = true; + rules = '' + id=RCPT01; protocol_state=DATA; protocol_state=END-OF-MESSAGE; action=rcpt(ccert_subject/100/3600/450 4.7.1 Exceeding maximum of 100 recipients per hour [$$ratecount]) + id=RCPT02; protocol_state=DATA; protocol_state=END-OF-MESSAGE; action=rcpt(ccert_subject/1000/86400/450 4.7.1 Exceeding maximum of 1000 recipients per day [$$ratecount]) + ''; + }; }; } diff --git a/hosts/surtr/postgresql.nix b/hosts/surtr/postgresql.nix index 66ce60eb..7013ae97 100644 --- a/hosts/surtr/postgresql.nix +++ b/hosts/surtr/postgresql.nix @@ -104,7 +104,7 @@ in { ALTER TABLE mailbox_mapping ALTER local TYPE citext; ALTER TABLE mailbox_mapping ALTER domain TYPE citext; - CREATE VIEW mailbox_quota_rule (id, mailbox, quota_rule) AS SELECT id, mailbox, (CASE WHEN quota_bytes IS NULL THEN '*:ignore' ELSE '*:bytes=' || quota_bytes END) AS quota_rule FROM mailbox; + CREATE VIEW mailbox_quota_rule (id, mailbox, quota_rule) AS SELECT id, mailbox, (CASE WHEN quota_bytes IS NULL THEN '*:ignore' ELSE '*:bytes=' || quota_bytes END) AS quota_rule FROM mailbox; CREATE VIEW virtual_mailbox_domain (domain) AS SELECT DISTINCT domain FROM mailbox_mapping; CREATE VIEW virtual_mailbox_mapping (lookup) AS SELECT (CASE WHEN local IS NULL THEN ''' ELSE local END) || '@' || domain AS lookup FROM mailbox_mapping; @@ -143,6 +143,15 @@ in { GRANT SELECT ON ALL TABLES IN SCHEMA public TO "spm"; COMMIT; + + BEGIN; + SELECT _v.register_patch('007-ccert-sender-policy', ARRAY['000-base'], null); + + CREATE USER "postfix-ccert-sender-policy"; + GRANT CONNECT ON DATABASE "email" TO "postfix-ccert-sender-policy"; + ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO "postfix-ccert-sender-policy"; + GRANT SELECT ON ALL TABLES IN SCHEMA public TO "postfix-ccert-sender-policy"; + COMMIT; ''} ''; }; diff --git a/hosts/vidhar/borg/borgsnap/borgsnap/__main__.py b/hosts/vidhar/borg/borgsnap/borgsnap/__main__.py new file mode 100644 index 00000000..e93e6a60 --- /dev/null +++ b/hosts/vidhar/borg/borgsnap/borgsnap/__main__.py @@ -0,0 +1,202 @@ +import argparse +import os, sys, signal +from pyprctl import cap_permitted, cap_inheritable, cap_effective, cap_ambient, Cap +from pwd import getpwnam + +from datetime import datetime, timezone +from dateutil.parser import isoparse + +from xdg import xdg_runtime_dir +import unshare +from tempfile import TemporaryDirectory + +import logging + +import json +import subprocess + +import pathlib +from pathlib import Path + +from atomicwrites import atomic_write + +from traceback import format_exc + + +borg_pwd = getpwnam('borg') + +def as_borg(caps=set(), cwd=None): + if caps: + cap_permitted.add(*caps) + cap_inheritable.add(*caps) + cap_effective.add(*caps) + cap_ambient.add(*caps) + + os.setgid(borg_pwd.pw_gid) + os.setuid(borg_pwd.pw_uid) + + if cwd is not None: + os.chdir(cwd) + + +def _archive_name(snapshot, target, archive_prefix): + _, _, ts = snapshot.rpartition('@') + creation_time = isoparse(ts).astimezone(timezone.utc) + archive_name = _archive_basename(snapshot, archive_prefix) + return f'{target}::{archive_name}-{creation_time.strftime("%Y-%m-%dT%H:%M:%S")}' + +def _archive_basename(snapshot, archive_prefix): + base_name, _, _ = snapshot.rpartition('@') + return archive_prefix + base_name.replace('-', '--').replace('/', '-') + +def check(*, snapshot, target, archive_prefix, cache_file): + archives = None + if cache_file: + logger.debug('Trying cache...') + try: + with open(cache_file, mode='r', encoding='utf-8') as fp: + archives = set(json.load(fp)) + logger.info('Loaded archive list from cache') + except FileNotFoundError: + pass + + if not archives: + logger.info('Loading archive list from remote...') + with subprocess.Popen(['borg', 'list', '--info', '--lock-wait=600', '--json', target], stdout=subprocess.PIPE, preexec_fn=lambda: as_borg()) as proc: + archives = set([archive['barchive'] for archive in json.load(proc.stdout)['archives']]) + if cache_file: + logger.debug('Saving archive list to cache...') + with atomic_write(cache_file, mode='w', encoding='utf-8', overwrite=True) as fp: + json.dump(list(archives), fp) + + # logger.debug(f'archives: {archives}') + _, _, archive_name = _archive_name(snapshot, target, archive_prefix).partition('::') + if archive_name in archives: + logger.info(f'{archive_name} found') + return 0 + else: + logger.info(f'{archive_name} not found') + return 126 + +def create(*, snapshot, target, archive_prefix, dry_run): + basename = _archive_basename(snapshot, archive_prefix) + + with TemporaryDirectory(prefix=f'borg-mount_{basename}_', dir=os.environ.get('RUNTIME_DIRECTORY')) as tmpdir: + child = os.fork() + if child == 0: + unshare.unshare(unshare.CLONE_NEWNS) + subprocess.run(['mount', '--make-rprivate', '/'], check=True) + chroot = pathlib.Path(tmpdir) / 'chroot' + upper = pathlib.Path(tmpdir) / 'upper' + work = pathlib.Path(tmpdir) / 'work' + for path in [chroot,upper,work]: + path.mkdir() + subprocess.run(['mount', '-t', 'overlay', 'overlay', '-o', f'lowerdir=/,upperdir={upper},workdir={work}', chroot], check=True) + bindMounts = ['nix', 'run', 'run/secrets.d', 'run/wrappers', 'proc', 'dev', 'sys', pathlib.Path(os.path.expanduser('~')).relative_to('/')] + if os.environ.get('BORG_BASE_DIR'): + bindMounts.append(pathlib.Path(os.environ['BORG_BASE_DIR']).relative_to('/')) + if 'SSH_AUTH_SOCK' in os.environ: + bindMounts.append(pathlib.Path(os.environ['SSH_AUTH_SOCK']).parent.relative_to('/')) + for bindMount in bindMounts: + (chroot / bindMount).mkdir(parents=True,exist_ok=True) + # print(*['mount', '--bind', pathlib.Path('/') / bindMount, chroot / bindMount], file=stderr) + subprocess.run(['mount', '--bind', pathlib.Path('/') / bindMount, chroot / bindMount], check=True) + os.chroot(chroot) + os.chdir('/') + dir = pathlib.Path('/borg') + dir.mkdir(parents=True,exist_ok=True,mode=0o0750) + os.chown(dir, borg_pwd.pw_uid, borg_pwd.pw_gid) + try: + subprocess.run(['mount', '-t', 'zfs', '-o', 'ro', snapshot, dir], check=True) + env = os.environ.copy() + create_args = ['borg', + 'create', + '--lock-wait=600', + '--one-file-system', + '--compression=auto,zstd,10', + '--chunker-params=10,23,16,4095', + '--files-cache=ctime,size', + '--show-rc', + # '--remote-ratelimit=20480', + '--progress', + '--list', + '--filter=AMEi-x?', + '--stats' if not dry_run else '--dry-run' + ] + _, _, ts = snapshot.rpartition('@') + creation_time = isoparse(ts).astimezone(timezone.utc) + create_args += [f'--timestamp={creation_time.strftime("%Y-%m-%dT%H:%M:%S")}'] + env['BORG_FILES_CACHE_SUFFIX'] = basename + create_args += [_archive_name(snapshot, target, archive_prefix), '.'] + print({'create_args': create_args, 'cwd': dir, 'env': env}, file=sys.stderr) + subprocess.run(create_args, stdin=subprocess.DEVNULL, env=env, preexec_fn=lambda: as_borg(caps={CAP.DAC_READ_SEARCH}, cwd=dir), check=True) + # subprocess.run(create_args, stdin=subprocess.DEVNULL, env=env, preexec_fn=lambda: None, cwd=dir, check=True) + finally: + subprocess.run(['umount', dir], check=True) + os._exit(0) + else: + while True: + waitpid, waitret = os.wait() + if waitret != 0: + sys.exit(waitret) + if waitpid == child: + break + return 0 + +def sigterm(signum, frame): + raise SystemExit(128 + signum) + +def main(): + signal.signal(signal.SIGTERM, sigterm) + + global logger + logger = logging.getLogger(__name__) + console_handler = logging.StreamHandler() + console_handler.setFormatter( logging.Formatter('[%(levelname)s](%(name)s): %(message)s') ) + if sys.stderr.isatty(): + console_handler.setFormatter( logging.Formatter('%(asctime)s [%(levelname)s](%(name)s): %(message)s') ) + logger.addHandler(console_handler) + + # log uncaught exceptions + def log_exceptions(type, value, tb): + global logger + + logger.error(value) + sys.__excepthook__(type, value, tb) # calls default excepthook + + sys.excepthook = log_exceptions + + parser = argparse.ArgumentParser(prog='borgsnap') + parser.add_argument('--verbose', '-v', action='count', default=0) + parser.add_argument('--target', metavar='REPO', default='yggdrasil.borgbase:repo') + parser.add_argument('--archive-prefix', metavar='REPO', default='yggdrasil.vidhar.') + subparsers = parser.add_subparsers() + subparsers.required = True + parser.set_defaults(cmd=None) + check_parser = subparsers.add_parser('check') + check_parser.add_argument('--cache-file', type=lambda p: Path(p).absolute(), default=None) + check_parser.add_argument('snapshot') + check_parser.set_defaults(cmd=check) + create_parser = subparsers.add_parser('create') + create_parser.add_argument('--dry-run', '-n', action='store_true', default=False) + create_parser.add_argument('snapshot') + create_parser.set_defaults(cmd=create) + args = parser.parse_args() + + if args.verbose <= 0: + logger.setLevel(logging.WARNING) + elif args.verbose <= 1: + logger.setLevel(logging.INFO) + else: + logger.setLevel(logging.DEBUG) + + cmdArgs = {} + for copy in {'target', 'archive_prefix', 'snapshot', 'cache_file', 'dry_run'}: + if copy in vars(args): + cmdArgs[copy] = vars(args)[copy] + + return args.cmd(**cmdArgs) + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/hosts/vidhar/borg/borgsnap/setup.py b/hosts/vidhar/borg/borgsnap/setup.py new file mode 100644 index 00000000..76356bfc --- /dev/null +++ b/hosts/vidhar/borg/borgsnap/setup.py @@ -0,0 +1,10 @@ +from setuptools import setup + +setup(name='borgsnap', + packages=['borgsnap'], + entry_points={ + 'console_scripts': [ + 'borgsnap=borgsnap.__main__:main', + ], + } +) diff --git a/hosts/vidhar/borg/copy.py b/hosts/vidhar/borg/copy.py index 4e9599b8..b9b667f2 100755 --- a/hosts/vidhar/borg/copy.py +++ b/hosts/vidhar/borg/copy.py @@ -71,7 +71,7 @@ def read_repo(path): class ToSync: to_sync = deque() - + def __iter__(self): return self @@ -267,7 +267,7 @@ def sigterm(signum, frame): def main(): signal.signal(signal.SIGTERM, sigterm) - + if "::" in args.source: (src_repo_path, _, src_archive) = args.source.partition("::") entry = None diff --git a/hosts/vidhar/borg/default.nix b/hosts/vidhar/borg/default.nix index 579630a9..650c91ee 100644 --- a/hosts/vidhar/borg/default.nix +++ b/hosts/vidhar/borg/default.nix @@ -1,23 +1,28 @@ -{ config, pkgs, lib, ... }: +{ config, pkgs, lib, flakeInputs, ... }: with lib; let + sshConfig = pkgs.writeText "config" '' + Include /etc/ssh/ssh_config + + ControlMaster auto + ControlPath /var/lib/borg/.borgssh-master-%r@%n:%p + ControlPersist yes + + Host yggdrasil.borgbase + HostName nx69hpl8.repo.borgbase.com + User nx69hpl8 + IdentityFile ${config.sops.secrets."append.borgbase".path} + IdentitiesOnly yes + + BatchMode yes + ServerAliveInterval 10 + ServerAliveCountMax 30 + ''; + copyService = { repo, repoEscaped }: let serviceName = "copy-borg@${repoEscaped}"; - sshConfig = pkgs.writeText "config" '' - Include /etc/ssh/ssh_config - - Host yggdrasil.borgbase - HostName nx69hpl8.repo.borgbase.com - User nx69hpl8 - IdentityFile ${config.sops.secrets."append.borgbase".path} - IdentitiesOnly yes - - BatchMode yes - ServerAliveInterval 10 - ServerAliveCountMax 30 - ''; in nameValuePair serviceName { serviceConfig = { Type = "oneshot"; @@ -72,8 +77,63 @@ let --prefix PATH : ${makeBinPath (with pkgs; [utillinux borgbackup])}:${config.security.wrapperDir} ''; }); + + borgsnap = flakeInputs.mach-nix.lib.${config.nixpkgs.system}.buildPythonPackage rec { + pname = "borgsnap"; + src = ./borgsnap; + version = "0.0.0"; + ignoreDataOutdated = true; + + requirements = '' + atomicwrites + pyprctl + python-unshare + xdg + python-dateutil + ''; + postInstall = '' + wrapProgram $out/bin/borgsnap \ + --prefix PATH : ${makeBinPath (with pkgs; [utillinux borgbackup])}:${config.security.wrapperDir} + ''; + + providers.python-unshare = "nixpkgs"; + overridesPre = [ + (self: super: { python-unshare = super.python-unshare.overrideAttrs (oldAttrs: { name = "python-unshare-0.2.1"; version = "0.2.1"; }); }) + ]; + + _.xdg.buildInputs.add = with pkgs."python3Packages"; [ poetry ]; + _.tomli.buildInputs.add = with pkgs."python3Packages"; [ flit-core ]; + }; in { config = { + services.zfssnap.config.exec = { + check = "${borgsnap}/bin/borgsnap -vvv --target yggdrasil.borgbase:repo --archive-prefix yggdrasil.vidhar. check --cache-file /run/zfssnap-prune/archives-cache.json"; + cmd = "${borgsnap}/bin/borgsnap -vvv --target yggdrasil.borgbase:repo --archive-prefix yggdrasil.vidhar. create --dry-run"; + + halfweekly = "8"; + monthly = "-1"; + }; + + systemd.services = { + "zfssnap-prune" = { + serviceConfig = { + Environment = [ + "BORG_RSH=\"${pkgs.openssh}/bin/ssh -F ${sshConfig}\"" + "BORG_BASE_DIR=/var/lib/borg" + "BORG_CONFIG_DIR=/var/lib/borg/config" + "BORG_CACHE_DIR=/var/lib/borg/cache" + "BORG_SECURITY_DIR=/var/lib/borg/security" + "BORG_KEYS_DIR=/var/lib/borg/keys" + "BORG_KEY_FILE=${config.sops.secrets."yggdrasil.borgkey".path}" + "BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes" + "BORG_HOSTNAME_IS_UNIQUE=yes" + ]; + RuntimeDirectory = "zfssnap-prune"; + }; + }; + } // listToAttrs (map copyService [{ repo = "/srv/backup/borg/jotnar"; repoEscaped = "srv-backup-borg-jotnar"; }]); + + services.borgbackup.repos.jotnar = { path = "/srv/backup/borg/jotnar"; authorizedKeysAppendOnly = let @@ -111,11 +171,9 @@ in { mode = "0400"; }; - systemd.services = listToAttrs (map copyService [{ repo = "/srv/backup/borg/jotnar"; repoEscaped = "srv-backup-borg-jotnar"; }]); - systemd.timers."copy-borg@srv-backup-borg-jotnar" = { wantedBy = ["multi-user.target"]; - + timerConfig = { OnCalendar = "*-*-* 00/4:00:00 Europe/Berlin"; }; diff --git a/hosts/vidhar/default.nix b/hosts/vidhar/default.nix index 121cc9df..3f5d17d5 100644 --- a/hosts/vidhar/default.nix +++ b/hosts/vidhar/default.nix @@ -1,4 +1,7 @@ { hostName, flake, config, pkgs, lib, ... }: + +with lib; + { imports = with flake.nixosModules.systemProfiles; [ ./zfs.nix ./network ./samba.nix ./dns ./prometheus ./borg @@ -39,7 +42,7 @@ luks.devices = { nvm0 = { device = "/dev/disk/by-label/${hostName}-nvm0"; bypassWorkqueues = true; }; nvm1 = { device = "/dev/disk/by-label/${hostName}-nvm1"; bypassWorkqueues = true; }; - + hdd0.device = "/dev/disk/by-label/${hostName}-hdd0"; hdd1.device = "/dev/disk/by-label/${hostName}-hdd1"; hdd2.device = "/dev/disk/by-label/${hostName}-hdd2"; @@ -58,7 +61,7 @@ options = [ "mode=0755" ]; }; }; - + services.timesyncd.enable = false; services.chrony = { enable = true; @@ -132,6 +135,7 @@ access_log syslog:server=unix:/dev/log main; error_log syslog:server=unix:/dev/log info; + client_body_buffer_size 16m; client_body_temp_path /run/nginx-client-bodies; ''; upstreams.grafana = { @@ -173,12 +177,12 @@ sopsFile = ./selfsigned.key; }; systemd.services.nginx = { - preStart = lib.mkForce config.services.nginx.preStart; + preStart = mkForce config.services.nginx.preStart; serviceConfig = { - ExecReload = lib.mkForce "${pkgs.coreutils}/bin/kill -HUP $MAINPID"; + ExecReload = mkForce "${pkgs.coreutils}/bin/kill -HUP $MAINPID"; LoadCredential = [ "selfsigned.key:${config.sops.secrets."selfsigned.key".path}" ]; - RuntimeDirectory = lib.mkForce [ "nginx" "nginx-client-bodies" ]; + RuntimeDirectory = mkForce [ "nginx" "nginx-client-bodies" ]; RuntimeDirectoryMode = "0750"; }; }; @@ -232,7 +236,7 @@ }; }; systemd.services.loki.preStart = let - rulesYaml = lib.generators.toYAML {} { + rulesYaml = generators.toYAML {} { groups = [ { name = "power-failures"; rules = [ @@ -311,6 +315,29 @@ timers.wants = ["systemd-tmpfiles-clean.timer"]; }; + services.smartd = { + enable = true; + autodetect = false; + defaults.monitored = "-a -o on -s (S/../.././02|L/../../7/04)"; + devices = map (dev: { device = "/dev/disk/by-path/${dev}"; }) [ + "pci-0000:00:1f.2-ata-1" + "pci-0000:00:1f.2-ata-3" + "pci-0000:00:1f.2-ata-4" + "pci-0000:00:1f.2-ata-5" + "pci-0000:00:1f.2-ata-6" + "pci-0000:02:00.0-nvme-1" + "pci-0000:05:00.0-sas-phy0-lun-0" + "pci-0000:05:00.0-sas-phy1-lun-0" + "pci-0000:06:00.0-nvme-1" + ]; + notifications = { + test = false; + mail.enable = false; + x11.enable = false; + wall.enable = false; + }; + }; + environment.systemPackages = with pkgs; [iotop vmtouch]; system.stateVersion = "21.05"; diff --git a/hosts/vidhar/prometheus/default.nix b/hosts/vidhar/prometheus/default.nix index 4c23d8a9..7ac86c30 100644 --- a/hosts/vidhar/prometheus/default.nix +++ b/hosts/vidhar/prometheus/default.nix @@ -34,20 +34,6 @@ in { enable = true; enabledCollectors = []; }; - smartctl = { - enable = true; - devices = map (dev: "/dev/disk/by-path/${dev}") [ - "pci-0000:00:1f.2-ata-1" - "pci-0000:00:1f.2-ata-3" - "pci-0000:00:1f.2-ata-4" - "pci-0000:00:1f.2-ata-5" - "pci-0000:00:1f.2-ata-6" - "pci-0000:02:00.0-nvme-1" - "pci-0000:05:00.0-sas-phy0-lun-0" - "pci-0000:05:00.0-sas-phy1-lun-0" - "pci-0000:06:00.0-nvme-1" - ]; - }; snmp = { enable = true; configurationPath = ./snmp.yml; @@ -124,10 +110,10 @@ in { } { job_name = "smartctl"; static_configs = [ - { targets = ["localhost:${toString config.services.prometheus.exporters.smartctl.port}"]; } + { targets = ["localhost:9633"]; } ]; relabel_configs = relabelHosts; - scrape_interval = "1s"; + scrape_interval = "60s"; } { job_name = "snmp"; static_configs = [ @@ -376,6 +362,30 @@ in { }; }; + systemd.services."prometheus-smartctl-exporter" = { + wantedBy = [ "multi-user.target" ]; + after = [ "network.target" ]; + path = with pkgs; [ smartmontools ]; + serviceConfig = { + Restart = "always"; + + CapabilityBoundingSet = ["CAP_DAC_OVERRIDE" "CAP_SYS_RAWIO" "CAP_SYS_ADMIN"]; + AmbientCapabilities = ["CAP_DAC_OVERRIDE" "CAP_SYS_RAWIO" "CAP_SYS_ADMIN"]; + ProtectSystem = "strict"; + DynamicUser = true; + LockPersonality = true; + MemoryDenyWriteExecute = true; + NoNewPrivileges = true; + PrivateDevices = false; + PrivateTmp = true; + ProcSubset = "pid"; + + Type = "simple"; + ExecStart = "${pkgs.smartprom}/bin/smartprom"; + Environment = "SMARTCTL_EXPORTER_PORT=9633"; + }; + }; + systemd.services."prometheus-systemd-exporter" = let cfg = config.services.prometheus.exporters.systemd; in { @@ -385,14 +395,6 @@ in { ''; }; - systemd.services."prometheus-smartctl-exporter" = { - serviceConfig = { - DeviceAllow = lib.mkForce config.services.prometheus.exporters.smartctl.devices; - CapabilityBoundingSet = lib.mkForce ["CAP_SYS_ADMIN"]; - AmbientCapabilities = lib.mkForce ["CAP_SYS_ADMIN"]; - }; - }; - services.nginx = { upstreams.prometheus = { servers = { "localhost:${toString config.services.prometheus.port}" = {}; }; diff --git a/hosts/vidhar/zfs.nix b/hosts/vidhar/zfs.nix index ef285536..52b48aca 100644 --- a/hosts/vidhar/zfs.nix +++ b/hosts/vidhar/zfs.nix @@ -130,7 +130,21 @@ echo "=== ZPOOL IMPORT COMPLETE ===" ''; - services.zfssnap.enable = true; + services.zfssnap = { + enable = true; + config.keep = { + within = "15m"; + "5m" = "48"; + "15m" = "32"; + hourly = "48"; + "4h" = "24"; + "12h" = "12"; + daily = "62"; + halfweekly = "32"; + weekly = "24"; + monthly = "-1"; + }; + }; services.zfs.trim.enable = false; services.zfs.autoScrub = { enable = true; diff --git a/modules/postfwd.nix b/modules/postfwd.nix new file mode 100644 index 00000000..4afea0a1 --- /dev/null +++ b/modules/postfwd.nix @@ -0,0 +1,65 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.postfwd; +in { + options = { + services.postfwd = with types; { + enable = mkEnableOption "postfwd3 - postfix firewall daemon"; + + rules = mkOption { + type = lines; + default = ""; + }; + }; + }; + + config = mkIf cfg.enable { + systemd.services.postfwd = { + description = "postfwd3 - postfix firewall daemon"; + wantedBy = ["multi-user.target"]; + before = ["postfix.service"]; + + serviceConfig = { + Type = "forking"; + + ExecStart = "${pkgs.postfwd}/bin/postfwd3 ${escapeShellArgs [ + "-vv" + "--daemon" "--user" "postfwd" "--group" "postfwd" + "--pidfile" "/run/postfwd3/postfwd3.pid" + "--proto" "unix" + "--port" "/run/postfwd3/postfwd3.sock" + "--save_rates" "/var/lib/postfwd/rates" + "--file" (pkgs.writeText "postfwd3-rules" cfg.rules) + ]}"; + PIDFile = "/run/postfwd3/postfwd3.pid"; + + Restart = "always"; + RestartSec = 5; + TimeoutSec = 10; + + RuntimeDirectory = ["postfwd3"]; + StateDirectory = ["postfwd"]; + + DynamicUser = true; + ProtectSystem = "strict"; + SystemCallFilter = "@system-service"; + NoNewPrivileges = true; + ProtectKernelTunables = true; + ProtectKernelModules = true; + ProtectKernelLogs = true; + ProtectControlGroups = true; + MemoryDenyWriteExecute = true; + RestrictSUIDSGID = true; + KeyringMode = "private"; + ProtectClock = true; + RestrictRealtime = true; + PrivateDevices = true; + PrivateTmp = true; + ProtectHostname = true; + }; + }; + }; +} diff --git a/modules/zfssnap/default.nix b/modules/zfssnap/default.nix index d1080e8a..f3e2f9c2 100644 --- a/modules/zfssnap/default.nix +++ b/modules/zfssnap/default.nix @@ -1,7 +1,7 @@ { config, pkgs, lib, ... }: with lib; - + let zfssnap = pkgs.stdenv.mkDerivation rec { name = "zfssnap"; @@ -37,7 +37,7 @@ in { options = { services.zfssnap = { enable = mkEnableOption "zfssnap service"; - + config = mkOption { type = with types; attrsOf (attrsOf str); default = { @@ -82,7 +82,7 @@ in { ExecStart = let mkSectionName = name: strings.escape [ "[" "]" ] (strings.toUpper name); zfssnapConfig = generators.toINI { inherit mkSectionName; } cfg.config; - in "${zfssnap}/bin/zfssnap -v prune --config=${pkgs.writeText "zfssnap.ini" zfssnapConfig}"; + in "${zfssnap}/bin/zfssnap -vv prune --config=${pkgs.writeText "zfssnap.ini" zfssnapConfig}"; }; }; diff --git a/modules/zfssnap/zfssnap.py b/modules/zfssnap/zfssnap.py index 21ed1d5b..a8dae75f 100644 --- a/modules/zfssnap/zfssnap.py +++ b/modules/zfssnap/zfssnap.py @@ -3,9 +3,9 @@ import csv import subprocess import io -from distutils.util import strtobool +from distutils.util import strtobool from datetime import datetime, timezone, timedelta -from dateutil.tz import gettz, tzlocal +from dateutil.tz import gettz, tzutc import pytimeparse import argparse import re @@ -27,6 +27,36 @@ from math import floor import asyncio +from dataclasses import dataclass + + +TIME_PATTERNS = OrderedDict([ + ("secondly", lambda t: t.strftime('%Y-%m-%d %H:%M:%S')), + ("minutely", lambda t: t.strftime('%Y-%m-%d %H:%M')), + ("5m", lambda t: (t.strftime('%Y-%m-%d %H'), floor(t.minute / 5) * 5)), + ("15m", lambda t: (t.strftime('%Y-%m-%d %H'), floor(t.minute / 15) * 15)), + ("hourly", lambda t: t.strftime('%Y-%m-%d %H')), + ("4h", lambda t: (t.strftime('%Y-%m-%d'), floor(t.hour / 4) * 4)), + ("12h", lambda t: (t.strftime('%Y-%m-%d'), floor(t.hour / 12) * 12)), + ("daily", lambda t: t.strftime('%Y-%m-%d')), + ("halfweekly", lambda t: (t.strftime('%G-%V'), floor(int(t.strftime('%u')) / 4) * 4)), + ("weekly", lambda t: t.strftime('%G-%V')), + ("monthly", lambda t: t.strftime('%Y-%m')), + ("yearly", lambda t: t.strftime('%Y')), +]) + +@dataclass(eq=True, order=True, frozen=True) +class Snap: + name: str + creation: datetime + +@dataclass(eq=True, order=True, frozen=True) +class KeptBecause: + rule: str + ix: int + base: str + period: str + @cache def _now(): @@ -42,56 +72,120 @@ def _log_cmd(*args): def _get_items(): items = {} - + args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', '-t', 'filesystem,volume', '-s', 'local,default,inherited,temporary,received', 'li.yggdrasil:auto-snapshot'] _log_cmd(*args) with subprocess.Popen(args, stdout=subprocess.PIPE) as proc: text_stdout = io.TextIOWrapper(proc.stdout) - reader = csv.reader(text_stdout, delimiter='\t', quoting=csv.QUOTE_NONE) - Row = namedtuple('Row', ['name', 'setting']) - for row in map(Row._make, reader): + reader = csv.DictReader(text_stdout, fieldnames=['name', 'setting'], delimiter='\t', quoting=csv.QUOTE_NONE) + Row = namedtuple('Row', reader.fieldnames) + for row in [Row(**data) for data in reader]: items[row.name] = bool(strtobool(row.setting)) return items - -def prune(config, dry_run, keep_newest): - prunable_snapshots = set() - args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', '-t', 'snapshot', '-s', 'local', 'li.yggdrasil:is-auto-snapshot'] - _log_cmd(*args) - with subprocess.Popen(args, stdout=subprocess.PIPE) as proc: - text_stdout = io.TextIOWrapper(proc.stdout) - reader = csv.reader(text_stdout, delimiter='\t', quoting=csv.QUOTE_NONE) - Row = namedtuple('Row', ['name', 'is_auto_snapshot']) - for row in map(Row._make, reader): - if bool(strtobool(row.is_auto_snapshot)): - prunable_snapshots.add(row.name) - - items = defaultdict(list) - Snap = namedtuple('Snap', ['name', 'creation']) - args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', '-t', 'snapshot', 'creation'] + +def _get_snaps(only_auto=True): + snapshots = defaultdict(list) + args = ['zfs', 'list', '-H', '-p', '-t', 'snapshot', '-o', 'name,li.yggdrasil:is-auto-snapshot,creation'] _log_cmd(*args) with subprocess.Popen(args, stdout=subprocess.PIPE) as proc: text_stdout = io.TextIOWrapper(proc.stdout) - reader = csv.reader(text_stdout, delimiter='\t', quoting=csv.QUOTE_NONE) - Row = namedtuple('Row', ['name', 'timestamp']) - for row in map(Row._make, reader): - if row.name not in prunable_snapshots: + reader = csv.DictReader(text_stdout, fieldnames=['name', 'is_auto_snapshot', 'timestamp'], delimiter='\t', quoting=csv.QUOTE_NONE) + Row = namedtuple('Row', reader.fieldnames) + for row in [Row(**data) for data in reader]: + if only_auto and not bool(strtobool(row.is_auto_snapshot)): continue base_name, _, _ = row.name.rpartition('@') creation = datetime.fromtimestamp(int(row.timestamp), timezone.utc) - items[base_name].append(Snap(name=row.name, creation=creation)) + snapshots[base_name].append(Snap(name=row.name, creation=creation)) + + return snapshots + +def prune(config, dry_run, keep_newest, do_exec): + do_exec = do_exec and 'EXEC' in config + prune_timezone = config.gettimezone('KEEP', 'timezone', fallback=tzutc()) + logger.debug(f'prune timezone: {prune_timezone}') + + items = _get_snaps() + + exec_candidates = set() + if do_exec: + exec_timezone = config.gettimezone('EXEC', 'timezone', fallback=prune_timezone) + logger.debug(f'exec timezone: {exec_timezone}') + + for rule, pattern in TIME_PATTERNS.items(): + desired_count = config.getint('EXEC', rule, fallback=0) + + for base, snaps in items.items(): + periods = OrderedDict() + + for snap in sorted(snaps, key=lambda snap: snap.creation): + period = pattern(snap.creation.astimezone(exec_timezone)) + if period not in periods: + periods[period] = deque() + periods[period].append(snap) + + to_exec = desired_count + ordered_periods = periods.items() + for period, period_snaps in ordered_periods: + if to_exec == 0: + break + + for snap in period_snaps: + exec_candidates.add(snap) + logger.debug(f'{snap.name} is exec candidate') + to_exec -= 1 + break + + if to_exec > 0: + logger.debug(f'Missing {to_exec} to fulfill exec {rule}={desired_count} for ‘{base}’') + + check_cmd = config.get('EXEC', 'check', fallback=None) + if check_cmd: + already_execed = set() + for snap in exec_candidates: + args = [] + args += shlex.split(check_cmd) + args += [snap.name] + _log_cmd(*args) + check_res = subprocess.run(args) + if check_res.returncode == 0: + already_execed.add(snap) + logger.debug(f'{snap.name} already execed') + exec_candidates -= already_execed + + exec_cmd = config.get('EXEC', 'cmd', fallback=None) + exec_count = config.getint('EXEC', 'count', fallback=1) + if exec_cmd: + execed = set() + for snap in sorted(exec_candidates, key=lambda snap: snap.creation): + if len(execed) >= exec_count: + logger.debug(f'exc_count of {exec_count} reached') + break + + args = [] + args += shlex.split(exec_cmd) + args += [snap.name] + _log_cmd(*args) + subprocess.run(args).check_returncode() + execed.add(snap) + + exec_candidates -= execed kept_count = defaultdict(lambda: defaultdict(lambda: 0)) - KeptBecause = namedtuple('KeptBecause', ['rule', 'ix', 'base', 'period']) kept_because = OrderedDict() def keep_because(base, snap, rule, period=None): - nonlocal KeptBecause, kept_count, kept_because + nonlocal kept_count, kept_because kept_count[rule][base] += 1 if snap not in kept_because: kept_because[snap] = deque() kept_because[snap].append(KeptBecause(rule=rule, ix=kept_count[rule][base], base=base, period=period)) + for candidate in exec_candidates: + base_name, _, _ = candidate.name.rpartition('@') + keep_because(base_name, candidate.name, 'exec-candidate') + within = config.gettimedelta('KEEP', 'within') if within > timedelta(seconds=0): for base, snaps in items.items(): @@ -109,31 +203,14 @@ def prune(config, dry_run, keep_newest): else: logger.warn('Skipping rule ‘within’ since retention period is zero') - prune_timezone = config.gettimezone('KEEP', 'timezone', fallback=tzlocal) - - PRUNING_PATTERNS = OrderedDict([ - ("secondly", lambda t: t.strftime('%Y-%m-%d %H:%M:%S')), - ("minutely", lambda t: t.strftime('%Y-%m-%d %H:%M')), - ("5m", lambda t: (t.strftime('%Y-%m-%d %H'), floor(t.minute / 5) * 5)), - ("15m", lambda t: (t.strftime('%Y-%m-%d %H'), floor(t.minute / 15) * 15)), - ("hourly", lambda t: t.strftime('%Y-%m-%d %H')), - ("4h", lambda t: (t.strftime('%Y-%m-%d'), floor(t.hour / 4) * 4)), - ("12h", lambda t: (t.strftime('%Y-%m-%d'), floor(t.hour / 12) * 12)), - ("daily", lambda t: t.strftime('%Y-%m-%d')), - ("halfweekly", lambda t: (t.strftime('%G-%V'), floor(int(t.strftime('%u')) / 4) * 4)), - ("weekly", lambda t: t.strftime('%G-%V')), - ("monthly", lambda t: t.strftime('%Y-%m')), - ("yearly", lambda t: t.strftime('%Y')), - ]) - - for rule, pattern in PRUNING_PATTERNS.items(): + for rule, pattern in TIME_PATTERNS.items(): desired_count = config.getint('KEEP', rule, fallback=0) for base, snaps in items.items(): periods = OrderedDict() - + for snap in sorted(snaps, key=lambda snap: snap.creation, reverse=keep_newest): - period = pattern(snap.creation) + period = pattern(snap.creation.astimezone(prune_timezone)) if period not in periods: periods[period] = deque() periods[period].append(snap) @@ -150,7 +227,7 @@ def prune(config, dry_run, keep_newest): break if to_keep > 0: - logger.debug(f'Missing {to_keep} to fulfill {rule}={desired_count} for ‘{base}’') + logger.debug(f'Missing {to_keep} to fulfill prune {rule}={desired_count} for ‘{base}’') for snap, reasons in kept_because.items(): reasons_str = ', '.join(map(str, reasons)) @@ -171,16 +248,16 @@ def prune(config, dry_run, keep_newest): logger.info(f'Would have pruned ‘{snap}’') else: logger.info(f'Pruned ‘{snap}’') - + def rename(snapshots, destroy=False, set_is_auto=False): args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', 'creation', *snapshots] _log_cmd(*args) renamed_to = set() with subprocess.Popen(args, stdout=subprocess.PIPE) as proc: text_stdout = io.TextIOWrapper(proc.stdout) - reader = csv.reader(text_stdout, delimiter='\t', quoting=csv.QUOTE_NONE) - Row = namedtuple('Row', ['name', 'timestamp']) - for row in map(Row._make, reader): + reader = csv.DictReader(text_stdout, fieldnames=['name', 'timestamp'], delimiter='\t', quoting=csv.QUOTE_NONE) + Row = namedtuple('Row', reader.fieldnames) + for row in [Row(**data) for data in reader]: creation = datetime.fromtimestamp(int(row.timestamp), timezone.utc) base_name, _, _ = row.name.rpartition('@') new_name = _snap_name(base_name, time=creation) @@ -217,7 +294,7 @@ def autosnap(): all_snap_names = set() async def do_snapshot(*snap_items, recursive=False): nonlocal items, all_snap_names - snap_names = {_snap_name(item) for item in snap_items} + snap_names = {_snap_name(item) for item in snap_items if items[item]} if recursive: for snap_item in snap_items: all_snap_names |= {_snap_name(item) for item in items if item.startswith(snap_item)} @@ -268,7 +345,7 @@ def main(): sys.__excepthook__(type, value, tb) # calls default excepthook sys.excepthook = log_exceptions - + parser = argparse.ArgumentParser(prog='zfssnap') parser.add_argument('--verbose', '-v', action='count', default=0) subparsers = parser.add_subparsers() @@ -282,6 +359,7 @@ def main(): prune_parser.add_argument('--config', '-c', dest='config_files', nargs='*', default=list()) prune_parser.add_argument('--dry-run', '-n', action='store_true', default=False) prune_parser.add_argument('--keep-newest', action='store_true', default=False) + prune_parser.add_argument('--no-exec', dest='do_exec', action='store_false', default=True) prune_parser.set_defaults(cmd=prune) args = parser.parse_args() @@ -293,7 +371,7 @@ def main(): logger.setLevel(logging.DEBUG) cmdArgs = {} - for copy in {'snapshots', 'dry_run', 'destroy', 'keep_newest', 'set_is_auto'}: + for copy in {'snapshots', 'dry_run', 'destroy', 'keep_newest', 'set_is_auto', 'do_exec'}: if copy in vars(args): cmdArgs[copy] = vars(args)[copy] if 'config_files' in vars(args): @@ -308,7 +386,7 @@ def main(): }) search_files = args.config_files if args.config_files else [*BaseDirectory.load_config_paths('zfssnap.ini')] read_files = config.read(search_files) - + def format_config_files(files): if not files: return 'no files' @@ -323,4 +401,5 @@ def main(): args.cmd(**cmdArgs) -sys.exit(main()) +if __name__ == '__main__': + sys.exit(main()) diff --git a/nvfetcher.toml b/nvfetcher.toml index b05862a7..cb460076 100644 --- a/nvfetcher.toml +++ b/nvfetcher.toml @@ -58,4 +58,14 @@ fetch.url = "https://github.com/wofr06/lesspipe/archive/refs/tags/v$ver.tar.gz" [postfix-mta-sts-resolver] src.github = "Snawoot/postfix-mta-sts-resolver" src.prefix = "v" -fetch.url = "https://github.com/Snawoot/postfix-mta-sts-resolver/archive/refs/tags/v$ver.tar.gz" \ No newline at end of file +fetch.url = "https://github.com/Snawoot/postfix-mta-sts-resolver/archive/refs/tags/v$ver.tar.gz" + +[smartprom] +src.github = "matusnovak/prometheus-smartctl" +src.prefix = "v" +fetch.url = "https://github.com/matusnovak/prometheus-smartctl/archive/refs/tags/v$ver.tar.gz" + +[postfwd] +src.github_tag = "postfwd/postfwd" +src.prefix = "v" +fetch.url = "https://github.com/postfwd/postfwd/archive/refs/tags/v$ver.tar.gz" \ No newline at end of file diff --git a/overlays/postfwd.nix b/overlays/postfwd.nix new file mode 100644 index 00000000..8a4f4bd8 --- /dev/null +++ b/overlays/postfwd.nix @@ -0,0 +1,32 @@ +{ final, prev, sources, ... }: +let + deps = with final.perlPackages; [NetDNS NetServer IOMultiplex NetAddrIP NetCIDRLite DigestMD5 TimeHiRes Storable]; +in { + postfwd = prev.stdenv.mkDerivation rec { + inherit (sources.postfwd) pname version src; + + nativeBuildInputs = with prev; [ makeWrapper ]; + propagatedBuildInputs = [final.perlPackages.perl] ++ deps; + + buildPhase = '' + runHook preBuild + + substituteInPlace sbin/postfwd3 \ + --replace "/usr/bin/perl -T" "/usr/bin/perl" + + runHook postBuild + ''; + + installPhase = '' + runHook preInstall + + mkdir -p $out/bin + cp -t $out/bin sbin/postfwd3 + + wrapProgram $out/bin/postfwd3 \ + --prefix PERL5LIB : ${final.perlPackages.makePerlPath deps} + + runHook postInstall + ''; + }; +} diff --git a/overlays/smartprom/default.nix b/overlays/smartprom/default.nix new file mode 100644 index 00000000..0dd0771b --- /dev/null +++ b/overlays/smartprom/default.nix @@ -0,0 +1,19 @@ +{ final, prev, flakeInputs, sources, ... }: +{ + smartprom = flakeInputs.mach-nix.lib.${final.system}.buildPythonPackage rec { + inherit (sources.smartprom) src pname version; + ignoreDataOutdated = true; + + prePatch = '' + mkdir smartprom + mv smartprom.py smartprom/__main__.py + echo >> smartprom/__init__.py + + substituteAll ${./setup.py} ./setup.py + ''; + + requirements = '' + prometheus_client + ''; + }; +} diff --git a/overlays/smartprom/setup.py b/overlays/smartprom/setup.py new file mode 100644 index 00000000..c30fc557 --- /dev/null +++ b/overlays/smartprom/setup.py @@ -0,0 +1,11 @@ +from setuptools import setup + +setup(name='@pname@', + version='@version@', + packages=['@pname@'], + entry_points={ + 'console_scripts': [ + '@pname@=@pname@.__main__:main', + ], + } +) diff --git a/overlays/worktime/worktime.py b/overlays/worktime/worktime.py index 9cfc6cd4..1fc00061 100755 --- a/overlays/worktime/worktime.py +++ b/overlays/worktime/worktime.py @@ -117,6 +117,7 @@ class Worktime(object): force_day_to_work = True leave_days = set() leave_budget = dict() + time_per_day = None @staticmethod def holidays(year): @@ -151,10 +152,10 @@ class Worktime(object): def __init__(self, start_datetime=None, end_datetime=None, now=None, include_running=True, force_day_to_work=True, **kwargs): self.include_running = include_running self.force_day_to_work = force_day_to_work - + if now: self.now = now - + config = Worktime.config() config_dir = BaseDirectory.load_first_config('worktime') api = TogglAPI(api_token=config['TOGGL']['ApiToken'], workspace_id=config['TOGGL']['Workspace']) @@ -174,17 +175,17 @@ class Worktime(object): except IOError as e: if e.errno != 2: raise e - + hours_per_week = float(config.get('WORKTIME', 'HoursPerWeek', fallback=40)) workdays = set([int(d.strip()) for d in config.get('WORKTIME', 'Workdays', fallback='1,2,3,4,5').split(',')]) - time_per_day = timedelta(hours = hours_per_week) / len(workdays) + self.time_per_day = timedelta(hours = hours_per_week) / len(workdays) holidays = dict() leave_per_year = int(config.get('WORKTIME', 'LeavePerYear', fallback=30)) for year in range(start_date.year, end_date.year + 1): - holidays |= {k: v * time_per_day for k, v in Worktime.holidays(year).items()} + holidays |= {k: v * self.time_per_day for k, v in Worktime.holidays(year).items()} leave_frac = 1 if date(year, 1, 1) < start_date.date(): leave_frac = (date(year + 1, 1, 1) - start_date.date()) / (date(year + 1, 1, 1) - date(year, 1, 1)) @@ -199,7 +200,7 @@ class Worktime(object): day = datetime.strptime(datestr, date_format).replace(tzinfo=tzlocal()).date() if day != start_date.date(): continue - + self.leave_budget[day.year] = (self.leave_budget[day.year] if day.year in self.leave_budget else 0) + int(count) except IOError as e: if e.errno != 2: @@ -224,7 +225,7 @@ class Worktime(object): toDay = parse_single(toDay) else: fromDay = toDay = parse_single(datestr) - time = time_per_day + time = self.time_per_day if len(splitLine) == 2: [hours, datestr] = splitLine time = timedelta(hours = float(hours)) @@ -236,7 +237,7 @@ class Worktime(object): if end_date.date() < day or day < start_date.date(): continue - if excused_kind == 'leave' and not (day in holidays and holidays[day] >= time_per_day) and day.isoweekday() in workdays: + if excused_kind == 'leave' and not (day in holidays and holidays[day] >= self.time_per_day) and day.isoweekday() in workdays: self.leave_days.add(day) holidays[day] = time except IOError as e: @@ -244,7 +245,7 @@ class Worktime(object): raise e pull_forward = dict() - + start_day = start_date.date() end_day = end_date.date() @@ -271,7 +272,7 @@ class Worktime(object): if not d == datetime.strptime(c, date_format).replace(tzinfo=tzlocal()).date(): break else: if d >= end_date.date(): - pull_forward[d] = min(timedelta(hours = float(hours)), time_per_day - (holidays[d] if d in holidays else timedelta())) + pull_forward[d] = min(timedelta(hours = float(hours)), self.time_per_day - (holidays[d] if d in holidays else timedelta())) except IOError as e: if e.errno != 2: raise e @@ -280,10 +281,10 @@ class Worktime(object): if pull_forward: end_day = max(end_day, max(list(pull_forward))) - + for day in [start_day + timedelta(days = x) for x in range(0, (end_day - start_day).days + 1)]: if day.isoweekday() in workdays: - time_to_work = time_per_day + time_to_work = self.time_per_day if day in holidays.keys(): time_to_work -= holidays[day] if time_to_work > timedelta(): @@ -302,7 +303,7 @@ class Worktime(object): day = datetime.strptime(datestr, date_format).replace(tzinfo=tzlocal()).date() extra_days_to_work[day] = timedelta(hours = float(hours)) else: - extra_days_to_work[datetime.strptime(stripped_line, date_format).replace(tzinfo=tzlocal()).date()] = time_per_day + extra_days_to_work[datetime.strptime(stripped_line, date_format).replace(tzinfo=tzlocal()).date()] = self.time_per_day except IOError as e: if e.errno != 2: raise e @@ -329,15 +330,15 @@ class Worktime(object): extra_day_time_left = timedelta() for extra_day in extra_days_forward: - day_time = max(timedelta(), time_per_day - extra_days_to_work[extra_day]) + day_time = max(timedelta(), self.time_per_day - extra_days_to_work[extra_day]) extra_day_time_left += day_time extra_day_time = min(extra_day_time_left, pull_forward[day]) time_forward = pull_forward[day] - extra_day_time if extra_day_time_left > timedelta(): for extra_day in extra_days_forward: - day_time = max(timedelta(), time_per_day - extra_days_to_work[extra_day]) + day_time = max(timedelta(), self.time_per_day - extra_days_to_work[extra_day]) extra_days_to_work[extra_day] += extra_day_time * (day_time / extra_day_time_left) - + hours_per_day_forward = time_forward / len(days_forward) if len(days_forward) > 0 else timedelta() days_forward.discard(end_date.date()) @@ -345,7 +346,7 @@ class Worktime(object): if end_date.date() in extra_days_to_work: self.time_pulled_forward += extra_days_to_work[end_date.date()] - + self.time_to_work += self.time_pulled_forward self.time_worked += api.get_billable_hours(start_date, self.now, rounding = config.getboolean('WORKTIME', 'rounding', fallback=True)) @@ -377,10 +378,10 @@ def worktime(**args): if total_minutes_difference >= 0: difference_string = difference_string(total_minutes_difference * timedelta(minutes = 1)) - return "{difference_string}/{clockout_time}".format(difference_string = difference_string, clockout_time = clockout_time.strftime("%H:%M")) + return f"{difference_string}/{clockout_time:%H:%M}" else: difference_string = difference_string(abs(total_minutes_difference) * timedelta(minutes = 1)) - return "{clockout_time}/{difference_string}".format(difference_string = difference_string, clockout_time = clockout_time.strftime("%H:%M")) + return f"{clockout_time:%H:%M}/{difference_string}" else: if worktime.running_entry: difference_string = difference_string(abs(total_minutes_difference) * timedelta(minutes = 1)) @@ -427,7 +428,20 @@ def time_worked(now, **args): if hours_difference == 0 or minutes_difference != 0: difference_string += f"{minutes_difference}m" - print(difference_string) + clockout_time = None + clockout_difference = None + if then.is_workday or now.is_workday: + target_time = max(then.time_per_day, now.time_per_day) if then.time_per_day and now.time_per_day else (then.time_per_day if then.time_per_day else now.time_per_day); + difference = target_time - worked + clockout_difference = 5 * ceil(difference / timedelta(minutes = 5)) + clockout_time = now.now + difference + clockout_time += (5 - clockout_time.minute % 5) * timedelta(minutes = 1) + clockout_time = clockout_time.replace(second = 0, microsecond = 0) + + if now.running_entry and clockout_time and clockout_difference >= 0: + print(f"{difference_string}/{clockout_time:%H:%M}") + else: + print(difference_string) else: print(worked) @@ -445,7 +459,7 @@ def holidays(year, **args): date_format = config.get('WORKTIME', 'DateFormat', fallback='%Y-%m-%d') table_data = [] - + holidays = Worktime.holidays(year) for k, v in holidays.items(): kstr = k.strftime(date_format) @@ -473,7 +487,7 @@ def leave(year, table, **args): break else: print(f'Unaccounted leave: {day}', file=stderr) - + if table: table_data = [] for year, days in leave_budget.items(): diff --git a/user-profiles/utils.nix b/user-profiles/utils.nix index d0d2b2c8..c5042d41 100644 --- a/user-profiles/utils.nix +++ b/user-profiles/utils.nix @@ -24,6 +24,7 @@ mosh tree vnstat file pv bc fast-cli zip nmap aspell aspellDicts.de aspellDicts.en borgbackup man-pages rsync socat inetutils yq cached-nix-shell persistent-nix-shell rage + smartmontools hdparm ]; }; } -- cgit v1.2.3