summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGregor Kleen <gkleen@yggdrasil.li>2022-09-13 10:29:35 +0200
committerGregor Kleen <gkleen@yggdrasil.li>2022-09-13 10:29:35 +0200
commitb931543508377c0e48a6801e4ea217eb523e2b03 (patch)
tree373c8ab46c6e78cb69654d816fadf8d6fef1fd28
parent92dab2dbad09bee9698fc0a9734140af37ca550a (diff)
downloadnixos-b931543508377c0e48a6801e4ea217eb523e2b03.tar
nixos-b931543508377c0e48a6801e4ea217eb523e2b03.tar.gz
nixos-b931543508377c0e48a6801e4ea217eb523e2b03.tar.bz2
nixos-b931543508377c0e48a6801e4ea217eb523e2b03.tar.xz
nixos-b931543508377c0e48a6801e4ea217eb523e2b03.zip
...
-rw-r--r--_sources/generated.json58
-rw-r--r--_sources/generated.nix46
-rw-r--r--accounts/gkleen@sif/default.nix4
-rw-r--r--hosts/surtr/email/ccert-policy-server/ccert_policy_server/__init__.py0
-rw-r--r--hosts/surtr/email/ccert-policy-server/ccert_policy_server/__main__.py92
-rw-r--r--hosts/surtr/email/ccert-policy-server/setup.py12
-rw-r--r--hosts/surtr/email/default.nix88
-rw-r--r--hosts/surtr/postgresql.nix11
-rw-r--r--hosts/vidhar/borg/borgsnap/borgsnap/__main__.py202
-rw-r--r--hosts/vidhar/borg/borgsnap/setup.py10
-rwxr-xr-xhosts/vidhar/borg/copy.py4
-rw-r--r--hosts/vidhar/borg/default.nix92
-rw-r--r--hosts/vidhar/default.nix39
-rw-r--r--hosts/vidhar/prometheus/default.nix50
-rw-r--r--hosts/vidhar/zfs.nix16
-rw-r--r--modules/postfwd.nix65
-rw-r--r--modules/zfssnap/default.nix6
-rw-r--r--modules/zfssnap/zfssnap.py197
-rw-r--r--nvfetcher.toml12
-rw-r--r--overlays/postfwd.nix32
-rw-r--r--overlays/smartprom/default.nix19
-rw-r--r--overlays/smartprom/setup.py11
-rwxr-xr-xoverlays/worktime/worktime.py58
-rw-r--r--user-profiles/utils.nix1
24 files changed, 951 insertions, 174 deletions
diff --git a/_sources/generated.json b/_sources/generated.json
index 78285ff5..dcbde8b1 100644
--- a/_sources/generated.json
+++ b/_sources/generated.json
@@ -64,11 +64,11 @@
64 "pinned": false, 64 "pinned": false,
65 "src": { 65 "src": {
66 "name": null, 66 "name": null,
67 "sha256": "sha256-eOq2PYvLB6ueIjC8Rif/p7HJKW8AkbEjf1La9/HUaC8=", 67 "sha256": "sha256-9Gx7Cwb5UWE1NaSA0iun6FY/TwjT0/jjcAR98SLLFjc=",
68 "type": "url", 68 "type": "url",
69 "url": "https://github.com/wofr06/lesspipe/archive/refs/tags/v2.05.tar.gz" 69 "url": "https://github.com/wofr06/lesspipe/archive/refs/tags/v2.06.tar.gz"
70 }, 70 },
71 "version": "2.05" 71 "version": "2.06"
72 }, 72 },
73 "mpv-autosave": { 73 "mpv-autosave": {
74 "cargoLocks": null, 74 "cargoLocks": null,
@@ -101,11 +101,11 @@
101 "name": null, 101 "name": null,
102 "owner": "po5", 102 "owner": "po5",
103 "repo": "chapterskip", 103 "repo": "chapterskip",
104 "rev": "f4c5da3e7661212eb491cc1d85beafbf951e32f0", 104 "rev": "b26825316e3329882206ae78dc903ebc4613f039",
105 "sha256": "sha256-ZK64WdhXMubzfkKFVg7rX5dPc4IVHDwN0T1X9tXwsJI=", 105 "sha256": "sha256-OTrLQE3rYvPQamEX23D6HttNjx3vafWdTMxTiWpDy90=",
106 "type": "github" 106 "type": "github"
107 }, 107 },
108 "version": "f4c5da3e7661212eb491cc1d85beafbf951e32f0" 108 "version": "b26825316e3329882206ae78dc903ebc4613f039"
109 }, 109 },
110 "mpv-createchapter": { 110 "mpv-createchapter": {
111 "cargoLocks": null, 111 "cargoLocks": null,
@@ -139,11 +139,11 @@
139 "name": null, 139 "name": null,
140 "owner": "hoyon", 140 "owner": "hoyon",
141 "repo": "mpv-mpris", 141 "repo": "mpv-mpris",
142 "rev": "0.8.1", 142 "rev": "0.9",
143 "sha256": "sha256-ugEiQZA1vQCVwyv3ViM84Qz8lhRvy17vcxjayYevTAs=", 143 "sha256": "sha256-leW7oCWTnJuprVnJJ+iXd3nuB2VXl3fw8FmPxv7d6rA=",
144 "type": "github" 144 "type": "github"
145 }, 145 },
146 "version": "0.8.1" 146 "version": "0.9"
147 }, 147 },
148 "mpv-reload": { 148 "mpv-reload": {
149 "cargoLocks": null, 149 "cargoLocks": null,
@@ -172,11 +172,25 @@
172 "pinned": false, 172 "pinned": false,
173 "src": { 173 "src": {
174 "name": null, 174 "name": null,
175 "sha256": "sha256-snvUmKZVckDNt2nnFOEa4cbGLtm825UgvA3cBpoNGLw=", 175 "sha256": "sha256-3vB6krsP6G25bviG27QI+9NyJN2YKOOmM5KhKUclJPc=",
176 "type": "url", 176 "type": "url",
177 "url": "https://github.com/Snawoot/postfix-mta-sts-resolver/archive/refs/tags/v1.1.3.tar.gz" 177 "url": "https://github.com/Snawoot/postfix-mta-sts-resolver/archive/refs/tags/v1.1.4.tar.gz"
178 }, 178 },
179 "version": "1.1.3" 179 "version": "1.1.4"
180 },
181 "postfwd": {
182 "cargoLocks": null,
183 "extract": null,
184 "name": "postfwd",
185 "passthru": null,
186 "pinned": false,
187 "src": {
188 "name": null,
189 "sha256": "sha256-mMKXzeqg2PfXkvGL7qugOelm/I2fZnUidq6/ugXDHa0=",
190 "type": "url",
191 "url": "https://github.com/postfwd/postfwd/archive/refs/tags/v2.03.tar.gz"
192 },
193 "version": "2.03"
180 }, 194 },
181 "psql-versioning": { 195 "psql-versioning": {
182 "cargoLocks": null, 196 "cargoLocks": null,
@@ -196,6 +210,20 @@
196 }, 210 },
197 "version": "3e578ff5e5aa6c7e5459dbfa842a64a1b2674b2e" 211 "version": "3e578ff5e5aa6c7e5459dbfa842a64a1b2674b2e"
198 }, 212 },
213 "smartprom": {
214 "cargoLocks": null,
215 "extract": null,
216 "name": "smartprom",
217 "passthru": null,
218 "pinned": false,
219 "src": {
220 "name": null,
221 "sha256": "sha256-VbpFvDBygJswUfmufVjo/xXxDDmXLq/0D9ln8u+139E=",
222 "type": "url",
223 "url": "https://github.com/matusnovak/prometheus-smartctl/archive/refs/tags/v2.1.0.tar.gz"
224 },
225 "version": "2.1.0"
226 },
199 "uhk-agent": { 227 "uhk-agent": {
200 "cargoLocks": null, 228 "cargoLocks": null,
201 "extract": null, 229 "extract": null,
@@ -223,11 +251,11 @@
223 "name": null, 251 "name": null,
224 "owner": "umlaeute", 252 "owner": "umlaeute",
225 "repo": "v4l2loopback", 253 "repo": "v4l2loopback",
226 "rev": "4aadc417254bfa3b875bf0b69278ce400ce659b2", 254 "rev": "76434ab6f71d5ecbff8a218ff6bed91ea2bf73b8",
227 "sha256": "sha256-nHxIW5BmaZC6g7SElxboTcwtMDF4SCqi11MjYWsUZpo=", 255 "sha256": "sha256-c6g63jW+a+v/TxLD9NnQGn/aUgivwVkxzP+hZ65w2/o=",
228 "type": "github" 256 "type": "github"
229 }, 257 },
230 "version": "4aadc417254bfa3b875bf0b69278ce400ce659b2" 258 "version": "76434ab6f71d5ecbff8a218ff6bed91ea2bf73b8"
231 }, 259 },
232 "xcompose": { 260 "xcompose": {
233 "cargoLocks": null, 261 "cargoLocks": null,
diff --git a/_sources/generated.nix b/_sources/generated.nix
index 8aecd856..a77cb5d8 100644
--- a/_sources/generated.nix
+++ b/_sources/generated.nix
@@ -38,10 +38,10 @@
38 }; 38 };
39 lesspipe = { 39 lesspipe = {
40 pname = "lesspipe"; 40 pname = "lesspipe";
41 version = "2.05"; 41 version = "2.06";
42 src = fetchurl { 42 src = fetchurl {
43 url = "https://github.com/wofr06/lesspipe/archive/refs/tags/v2.05.tar.gz"; 43 url = "https://github.com/wofr06/lesspipe/archive/refs/tags/v2.06.tar.gz";
44 sha256 = "sha256-eOq2PYvLB6ueIjC8Rif/p7HJKW8AkbEjf1La9/HUaC8="; 44 sha256 = "sha256-9Gx7Cwb5UWE1NaSA0iun6FY/TwjT0/jjcAR98SLLFjc=";
45 }; 45 };
46 }; 46 };
47 mpv-autosave = { 47 mpv-autosave = {
@@ -58,13 +58,13 @@
58 }; 58 };
59 mpv-chapterskip = { 59 mpv-chapterskip = {
60 pname = "mpv-chapterskip"; 60 pname = "mpv-chapterskip";
61 version = "f4c5da3e7661212eb491cc1d85beafbf951e32f0"; 61 version = "b26825316e3329882206ae78dc903ebc4613f039";
62 src = fetchFromGitHub ({ 62 src = fetchFromGitHub ({
63 owner = "po5"; 63 owner = "po5";
64 repo = "chapterskip"; 64 repo = "chapterskip";
65 rev = "f4c5da3e7661212eb491cc1d85beafbf951e32f0"; 65 rev = "b26825316e3329882206ae78dc903ebc4613f039";
66 fetchSubmodules = false; 66 fetchSubmodules = false;
67 sha256 = "sha256-ZK64WdhXMubzfkKFVg7rX5dPc4IVHDwN0T1X9tXwsJI="; 67 sha256 = "sha256-OTrLQE3rYvPQamEX23D6HttNjx3vafWdTMxTiWpDy90=";
68 }); 68 });
69 }; 69 };
70 mpv-createchapter = { 70 mpv-createchapter = {
@@ -80,13 +80,13 @@
80 }; 80 };
81 mpv-mpris = { 81 mpv-mpris = {
82 pname = "mpv-mpris"; 82 pname = "mpv-mpris";
83 version = "0.8.1"; 83 version = "0.9";
84 src = fetchFromGitHub ({ 84 src = fetchFromGitHub ({
85 owner = "hoyon"; 85 owner = "hoyon";
86 repo = "mpv-mpris"; 86 repo = "mpv-mpris";
87 rev = "0.8.1"; 87 rev = "0.9";
88 fetchSubmodules = false; 88 fetchSubmodules = false;
89 sha256 = "sha256-ugEiQZA1vQCVwyv3ViM84Qz8lhRvy17vcxjayYevTAs="; 89 sha256 = "sha256-leW7oCWTnJuprVnJJ+iXd3nuB2VXl3fw8FmPxv7d6rA=";
90 }); 90 });
91 }; 91 };
92 mpv-reload = { 92 mpv-reload = {
@@ -102,10 +102,18 @@
102 }; 102 };
103 postfix-mta-sts-resolver = { 103 postfix-mta-sts-resolver = {
104 pname = "postfix-mta-sts-resolver"; 104 pname = "postfix-mta-sts-resolver";
105 version = "1.1.3"; 105 version = "1.1.4";
106 src = fetchurl { 106 src = fetchurl {
107 url = "https://github.com/Snawoot/postfix-mta-sts-resolver/archive/refs/tags/v1.1.3.tar.gz"; 107 url = "https://github.com/Snawoot/postfix-mta-sts-resolver/archive/refs/tags/v1.1.4.tar.gz";
108 sha256 = "sha256-snvUmKZVckDNt2nnFOEa4cbGLtm825UgvA3cBpoNGLw="; 108 sha256 = "sha256-3vB6krsP6G25bviG27QI+9NyJN2YKOOmM5KhKUclJPc=";
109 };
110 };
111 postfwd = {
112 pname = "postfwd";
113 version = "2.03";
114 src = fetchurl {
115 url = "https://github.com/postfwd/postfwd/archive/refs/tags/v2.03.tar.gz";
116 sha256 = "sha256-mMKXzeqg2PfXkvGL7qugOelm/I2fZnUidq6/ugXDHa0=";
109 }; 117 };
110 }; 118 };
111 psql-versioning = { 119 psql-versioning = {
@@ -120,6 +128,14 @@
120 sha256 = "sha256-j+njRssJHTdNV3FbcA3MdUmzCaJxuYBrC0qwtK3HoyY="; 128 sha256 = "sha256-j+njRssJHTdNV3FbcA3MdUmzCaJxuYBrC0qwtK3HoyY=";
121 }; 129 };
122 }; 130 };
131 smartprom = {
132 pname = "smartprom";
133 version = "2.1.0";
134 src = fetchurl {
135 url = "https://github.com/matusnovak/prometheus-smartctl/archive/refs/tags/v2.1.0.tar.gz";
136 sha256 = "sha256-VbpFvDBygJswUfmufVjo/xXxDDmXLq/0D9ln8u+139E=";
137 };
138 };
123 uhk-agent = { 139 uhk-agent = {
124 pname = "uhk-agent"; 140 pname = "uhk-agent";
125 version = "1.5.17"; 141 version = "1.5.17";
@@ -130,13 +146,13 @@
130 }; 146 };
131 v4l2loopback = { 147 v4l2loopback = {
132 pname = "v4l2loopback"; 148 pname = "v4l2loopback";
133 version = "4aadc417254bfa3b875bf0b69278ce400ce659b2"; 149 version = "76434ab6f71d5ecbff8a218ff6bed91ea2bf73b8";
134 src = fetchFromGitHub ({ 150 src = fetchFromGitHub ({
135 owner = "umlaeute"; 151 owner = "umlaeute";
136 repo = "v4l2loopback"; 152 repo = "v4l2loopback";
137 rev = "4aadc417254bfa3b875bf0b69278ce400ce659b2"; 153 rev = "76434ab6f71d5ecbff8a218ff6bed91ea2bf73b8";
138 fetchSubmodules = true; 154 fetchSubmodules = true;
139 sha256 = "sha256-nHxIW5BmaZC6g7SElxboTcwtMDF4SCqi11MjYWsUZpo="; 155 sha256 = "sha256-c6g63jW+a+v/TxLD9NnQGn/aUgivwVkxzP+hZ65w2/o=";
140 }); 156 });
141 }; 157 };
142 xcompose = { 158 xcompose = {
diff --git a/accounts/gkleen@sif/default.nix b/accounts/gkleen@sif/default.nix
index d3db91c8..2cfaa620 100644
--- a/accounts/gkleen@sif/default.nix
+++ b/accounts/gkleen@sif/default.nix
@@ -258,12 +258,14 @@ in {
258 screen-locker = { 258 screen-locker = {
259 enable = true; 259 enable = true;
260 lockCmd = toString (pkgs.writeShellScript "lock" '' 260 lockCmd = toString (pkgs.writeShellScript "lock" ''
261 ${pkgs.playerctl}/bin/playerctl -a status | ${pkgs.gnugrep}/bin/grep -q "Playing" && exit 0
262
261 cleanup() { 263 cleanup() {
262 ${cfg.services.dunst.package}/bin/dunstctl set-paused false 264 ${cfg.services.dunst.package}/bin/dunstctl set-paused false
263 } 265 }
264 trap cleanup EXIT INT TERM 266 trap cleanup EXIT INT TERM
265 267
266 ${pkgs.playerctl}/bin/playerctl -a pause 268 # ${pkgs.playerctl}/bin/playerctl -a pause
267 ${cfg.services.dunst.package}/bin/dunstctl set-paused true 269 ${cfg.services.dunst.package}/bin/dunstctl set-paused true
268 ${pkgs.xsecurelock}/bin/xsecurelock 270 ${pkgs.xsecurelock}/bin/xsecurelock
269 ''); 271 '');
diff --git a/hosts/surtr/email/ccert-policy-server/ccert_policy_server/__init__.py b/hosts/surtr/email/ccert-policy-server/ccert_policy_server/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/hosts/surtr/email/ccert-policy-server/ccert_policy_server/__init__.py
diff --git a/hosts/surtr/email/ccert-policy-server/ccert_policy_server/__main__.py b/hosts/surtr/email/ccert-policy-server/ccert_policy_server/__main__.py
new file mode 100644
index 00000000..f481090c
--- /dev/null
+++ b/hosts/surtr/email/ccert-policy-server/ccert_policy_server/__main__.py
@@ -0,0 +1,92 @@
1from systemd.daemon import listen_fds
2from sdnotify import SystemdNotifier
3from socketserver import StreamRequestHandler, ThreadingMixIn
4from systemd_socketserver import SystemdSocketServer
5import sys
6from threading import Thread
7from psycopg_pool import ConnectionPool
8from psycopg.rows import namedtuple_row
9
10import logging
11
12
13class PolicyHandler(StreamRequestHandler):
14 def handle(self):
15 logger.debug('Handling new connection...')
16
17 self.args = dict()
18
19 line = None
20 while line := self.rfile.readline().removesuffix(b'\n'):
21 if b'=' not in line:
22 break
23
24 key, val = line.split(sep=b'=', maxsplit=1)
25 self.args[key.decode()] = val.decode()
26
27 logger.info('Connection parameters: %s', self.args)
28
29 allowed = False
30 with self.server.db_pool.connection() as conn:
31 local, domain = self.args['sender'].split(sep='@', maxsplit=1)
32 extension = None
33 if '+' in local:
34 local, extension = local.split(sep='+', maxsplit=1)
35
36 logger.debug('Parsed address: %s', {'local': local, 'extension': extension, 'domain': domain})
37
38 with conn.cursor() as cur:
39 cur.row_factory = namedtuple_row
40 cur.execute('SELECT "mailbox"."mailbox" as "user", "local", "extension", "domain" FROM "mailbox" INNER JOIN "mailbox_mapping" ON "mailbox".id = "mailbox_mapping"."mailbox" WHERE "mailbox"."mailbox" = %(user)s AND ("local" = %(local)s OR "local" IS NULL) AND ("extension" = %(extension)s OR "extension" IS NULL) AND "domain" = %(domain)s', params = {'user': self.args['ccert_subject'], 'local': local, 'extension': extension if extension is not None else '', 'domain': domain}, prepare=True)
41 for record in cur:
42 logger.debug('Received result: %s', record)
43 allowed = True
44
45 action = '550 5.7.0 Sender address not authorized for current user'
46 if allowed:
47 action = 'DUNNO'
48
49 logger.info('Reached verdict: %s', {'allowed': allowed, 'action': action})
50 self.wfile.write(f'action={action}\n\n'.encode())
51
52class ThreadedSystemdSocketServer(ThreadingMixIn, SystemdSocketServer):
53 def __init__(self, fd, RequestHandlerClass):
54 super().__init__(fd, RequestHandlerClass)
55
56 self.db_pool = ConnectionPool(min_size=1)
57 self.db_pool.wait()
58
59def main():
60 global logger
61 logger = logging.getLogger(__name__)
62 console_handler = logging.StreamHandler()
63 console_handler.setFormatter( logging.Formatter('[%(levelname)s](%(name)s): %(message)s') )
64 if sys.stderr.isatty():
65 console_handler.setFormatter( logging.Formatter('%(asctime)s [%(levelname)s](%(name)s): %(message)s') )
66 logger.addHandler(console_handler)
67 logger.setLevel(logging.DEBUG)
68
69 # log uncaught exceptions
70 def log_exceptions(type, value, tb):
71 global logger
72
73 logger.error(value)
74 sys.__excepthook__(type, value, tb) # calls default excepthook
75
76 sys.excepthook = log_exceptions
77
78 fds = listen_fds()
79 servers = [ThreadedSystemdSocketServer(fd, PolicyHandler) for fd in fds]
80
81 if servers:
82 for server in servers:
83 Thread(name=f'Server for fd{server.fileno()}', target=server.serve_forever).start()
84 else:
85 return 2
86
87 SystemdNotifier().notify('READY=1')
88
89 return 0
90
91if __name__ == '__main__':
92 sys.exit(main())
diff --git a/hosts/surtr/email/ccert-policy-server/setup.py b/hosts/surtr/email/ccert-policy-server/setup.py
new file mode 100644
index 00000000..d8eb415a
--- /dev/null
+++ b/hosts/surtr/email/ccert-policy-server/setup.py
@@ -0,0 +1,12 @@
1from setuptools import setup, find_packages
2
3setup(
4 name = 'ccert-policy-server',
5 version = '0.0.0',
6 packages = ['ccert_policy_server'],
7 entry_points = {
8 'console_scripts': [
9 'ccert-policy-server=ccert_policy_server.__main__:main'
10 ],
11 },
12)
diff --git a/hosts/surtr/email/default.nix b/hosts/surtr/email/default.nix
index 83bf02f5..9cfba1f1 100644
--- a/hosts/surtr/email/default.nix
+++ b/hosts/surtr/email/default.nix
@@ -1,4 +1,4 @@
1{ config, pkgs, lib, ... }: 1{ config, pkgs, lib, flakeInputs, ... }:
2 2
3with lib; 3with lib;
4 4
@@ -20,6 +20,27 @@ let
20 ''; 20 '';
21 }; 21 };
22 22
23 ccert-policy-server = flakeInputs.mach-nix.lib.${config.nixpkgs.system}.buildPythonPackage {
24 src = ./ccert-policy-server;
25 pname = "ccert-policy-server";
26 version = "0.0.0";
27
28 python = "python39";
29 ignoreDataOutdated = true;
30
31 requirements = ''
32 sdnotify
33 systemd-socketserver
34 psycopg >=3.0.0
35 psycopg-pool >=3.0.0
36 psycopg-binary >=3.0.0
37 '';
38
39 overridesPre = [
40 (self: super: { systemd-python = super.systemd.overrideAttrs (oldAttrs: { pname = "systemd-python"; }); })
41 ];
42 };
43
23 spmDomains = ["bouncy.email"]; 44 spmDomains = ["bouncy.email"];
24in { 45in {
25 config = { 46 config = {
@@ -35,7 +56,7 @@ in {
35 }; 56 };
36 }) 57 })
37 ]; 58 ];
38 59
39 services.postfix = { 60 services.postfix = {
40 enable = true; 61 enable = true;
41 hostname = "surtr.yggdrasil.li"; 62 hostname = "surtr.yggdrasil.li";
@@ -187,8 +208,9 @@ in {
187 "-o" "smtpd_tls_ask_ccert=yes" 208 "-o" "smtpd_tls_ask_ccert=yes"
188 "-o" "smtpd_tls_req_ccert=yes" 209 "-o" "smtpd_tls_req_ccert=yes"
189 "-o" "smtpd_client_restrictions=permit_tls_all_clientcerts,reject" 210 "-o" "smtpd_client_restrictions=permit_tls_all_clientcerts,reject"
211 "-o" "{smtpd_data_restrictions = check_policy_service unix:/run/postfwd3/postfwd3.sock}"
190 "-o" "smtpd_relay_restrictions=permit_tls_all_clientcerts,reject" 212 "-o" "smtpd_relay_restrictions=permit_tls_all_clientcerts,reject"
191 "-o" "smtpd_sender_restrictions=reject_unknown_sender_domain,reject_unverified_sender" 213 "-o" "{smtpd_sender_restrictions = reject_unknown_sender_domain,reject_unverified_sender,check_policy_service unix:/run/postfix-ccert-sender-policy.sock}"
192 "-o" "unverified_sender_reject_code=550" 214 "-o" "unverified_sender_reject_code=550"
193 "-o" "unverified_sender_reject_reason={Sender address rejected: undeliverable address}" 215 "-o" "unverified_sender_reject_reason={Sender address rejected: undeliverable address}"
194 "-o" "smtpd_recipient_restrictions=reject_unauth_pipelining,reject_non_fqdn_recipient,reject_unknown_recipient_domain,permit_tls_all_clientcerts,reject" 216 "-o" "smtpd_recipient_restrictions=reject_unauth_pipelining,reject_non_fqdn_recipient,reject_unknown_recipient_domain,permit_tls_all_clientcerts,reject"
@@ -415,7 +437,7 @@ in {
415 mail_plugins = $mail_plugins quota 437 mail_plugins = $mail_plugins quota
416 mailbox_list_index = yes 438 mailbox_list_index = yes
417 postmaster_address = postmaster@yggdrasil.li 439 postmaster_address = postmaster@yggdrasil.li
418 recipient_delimiter = 440 recipient_delimiter =
419 auth_username_chars = abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890.-+_@ 441 auth_username_chars = abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890.-+_@
420 442
421 service lmtp { 443 service lmtp {
@@ -431,7 +453,7 @@ in {
431 namespace inbox { 453 namespace inbox {
432 separator = / 454 separator = /
433 inbox = yes 455 inbox = yes
434 prefix = 456 prefix =
435 457
436 mailbox Trash { 458 mailbox Trash {
437 auto = no 459 auto = no
@@ -602,7 +624,7 @@ in {
602 ${pkgs.dovecot_pigeonhole}/bin/sievec $f 624 ${pkgs.dovecot_pigeonhole}/bin/sievec $f
603 done 625 done
604 ''; 626 '';
605 627
606 serviceConfig = { 628 serviceConfig = {
607 LoadCredential = [ 629 LoadCredential = [
608 "surtr.yggdrasil.li.key.pem:${config.security.acme.certs."surtr.yggdrasil.li".directory}/key.pem" 630 "surtr.yggdrasil.li.key.pem:${config.security.acme.certs."surtr.yggdrasil.li".directory}/key.pem"
@@ -703,7 +725,7 @@ in {
703 }; 725 };
704 systemd.sockets.spm = { 726 systemd.sockets.spm = {
705 wantedBy = [ "nginx.service" ]; 727 wantedBy = [ "nginx.service" ];
706 728
707 socketConfig = { 729 socketConfig = {
708 ListenStream = "/run/spm/server.sock"; 730 ListenStream = "/run/spm/server.sock";
709 SocketUser = "spm"; 731 SocketUser = "spm";
@@ -730,5 +752,57 @@ in {
730 enable = true; 752 enable = true;
731 loglevel = "debug"; 753 loglevel = "debug";
732 }; 754 };
755
756 systemd.sockets."postfix-ccert-sender-policy" = {
757 requiredBy = ["postfix.service"];
758 wants = ["postfix-ccert-sender-policy.service"];
759 socketConfig = {
760 ListenStream = "/run/postfix-ccert-sender-policy.sock";
761 };
762 };
763 systemd.services."postfix-ccert-sender-policy" = {
764 serviceConfig = {
765 Type = "notify";
766
767 ExecStart = "${ccert-policy-server}/bin/ccert-policy-server";
768
769 Environment = [
770 "PGDATABASE=email"
771 ];
772
773 DynamicUser = false;
774 User = "postfix-ccert-sender-policy";
775 Group = "postfix-ccert-sender-policy";
776 ProtectSystem = "strict";
777 SystemCallFilter = "@system-service";
778 NoNewPrivileges = true;
779 ProtectKernelTunables = true;
780 ProtectKernelModules = true;
781 ProtectKernelLogs = true;
782 ProtectControlGroups = true;
783 MemoryDenyWriteExecute = true;
784 RestrictSUIDSGID = true;
785 KeyringMode = "private";
786 ProtectClock = true;
787 RestrictRealtime = true;
788 PrivateDevices = true;
789 PrivateTmp = true;
790 ProtectHostname = true;
791 ReadWritePaths = ["/run/postgresql"];
792 };
793 };
794 users.users."postfix-ccert-sender-policy" = {
795 isSystemUser = true;
796 group = "postfix-ccert-sender-policy";
797 };
798 users.groups."postfix-ccert-sender-policy" = {};
799
800 services.postfwd = {
801 enable = true;
802 rules = ''
803 id=RCPT01; protocol_state=DATA; protocol_state=END-OF-MESSAGE; action=rcpt(ccert_subject/100/3600/450 4.7.1 Exceeding maximum of 100 recipients per hour [$$ratecount])
804 id=RCPT02; protocol_state=DATA; protocol_state=END-OF-MESSAGE; action=rcpt(ccert_subject/1000/86400/450 4.7.1 Exceeding maximum of 1000 recipients per day [$$ratecount])
805 '';
806 };
733 }; 807 };
734} 808}
diff --git a/hosts/surtr/postgresql.nix b/hosts/surtr/postgresql.nix
index 66ce60eb..7013ae97 100644
--- a/hosts/surtr/postgresql.nix
+++ b/hosts/surtr/postgresql.nix
@@ -104,7 +104,7 @@ in {
104 ALTER TABLE mailbox_mapping ALTER local TYPE citext; 104 ALTER TABLE mailbox_mapping ALTER local TYPE citext;
105 ALTER TABLE mailbox_mapping ALTER domain TYPE citext; 105 ALTER TABLE mailbox_mapping ALTER domain TYPE citext;
106 106
107 CREATE VIEW mailbox_quota_rule (id, mailbox, quota_rule) AS SELECT id, mailbox, (CASE WHEN quota_bytes IS NULL THEN '*:ignore' ELSE '*:bytes=' || quota_bytes END) AS quota_rule FROM mailbox; 107 CREATE VIEW mailbox_quota_rule (id, mailbox, quota_rule) AS SELECT id, mailbox, (CASE WHEN quota_bytes IS NULL THEN '*:ignore' ELSE '*:bytes=' || quota_bytes END) AS quota_rule FROM mailbox;
108 108
109 CREATE VIEW virtual_mailbox_domain (domain) AS SELECT DISTINCT domain FROM mailbox_mapping; 109 CREATE VIEW virtual_mailbox_domain (domain) AS SELECT DISTINCT domain FROM mailbox_mapping;
110 CREATE VIEW virtual_mailbox_mapping (lookup) AS SELECT (CASE WHEN local IS NULL THEN ''' ELSE local END) || '@' || domain AS lookup FROM mailbox_mapping; 110 CREATE VIEW virtual_mailbox_mapping (lookup) AS SELECT (CASE WHEN local IS NULL THEN ''' ELSE local END) || '@' || domain AS lookup FROM mailbox_mapping;
@@ -143,6 +143,15 @@ in {
143 143
144 GRANT SELECT ON ALL TABLES IN SCHEMA public TO "spm"; 144 GRANT SELECT ON ALL TABLES IN SCHEMA public TO "spm";
145 COMMIT; 145 COMMIT;
146
147 BEGIN;
148 SELECT _v.register_patch('007-ccert-sender-policy', ARRAY['000-base'], null);
149
150 CREATE USER "postfix-ccert-sender-policy";
151 GRANT CONNECT ON DATABASE "email" TO "postfix-ccert-sender-policy";
152 ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO "postfix-ccert-sender-policy";
153 GRANT SELECT ON ALL TABLES IN SCHEMA public TO "postfix-ccert-sender-policy";
154 COMMIT;
146 ''} 155 ''}
147 ''; 156 '';
148 }; 157 };
diff --git a/hosts/vidhar/borg/borgsnap/borgsnap/__main__.py b/hosts/vidhar/borg/borgsnap/borgsnap/__main__.py
new file mode 100644
index 00000000..e93e6a60
--- /dev/null
+++ b/hosts/vidhar/borg/borgsnap/borgsnap/__main__.py
@@ -0,0 +1,202 @@
1import argparse
2import os, sys, signal
3from pyprctl import cap_permitted, cap_inheritable, cap_effective, cap_ambient, Cap
4from pwd import getpwnam
5
6from datetime import datetime, timezone
7from dateutil.parser import isoparse
8
9from xdg import xdg_runtime_dir
10import unshare
11from tempfile import TemporaryDirectory
12
13import logging
14
15import json
16import subprocess
17
18import pathlib
19from pathlib import Path
20
21from atomicwrites import atomic_write
22
23from traceback import format_exc
24
25
26borg_pwd = getpwnam('borg')
27
28def as_borg(caps=set(), cwd=None):
29 if caps:
30 cap_permitted.add(*caps)
31 cap_inheritable.add(*caps)
32 cap_effective.add(*caps)
33 cap_ambient.add(*caps)
34
35 os.setgid(borg_pwd.pw_gid)
36 os.setuid(borg_pwd.pw_uid)
37
38 if cwd is not None:
39 os.chdir(cwd)
40
41
42def _archive_name(snapshot, target, archive_prefix):
43 _, _, ts = snapshot.rpartition('@')
44 creation_time = isoparse(ts).astimezone(timezone.utc)
45 archive_name = _archive_basename(snapshot, archive_prefix)
46 return f'{target}::{archive_name}-{creation_time.strftime("%Y-%m-%dT%H:%M:%S")}'
47
48def _archive_basename(snapshot, archive_prefix):
49 base_name, _, _ = snapshot.rpartition('@')
50 return archive_prefix + base_name.replace('-', '--').replace('/', '-')
51
52def check(*, snapshot, target, archive_prefix, cache_file):
53 archives = None
54 if cache_file:
55 logger.debug('Trying cache...')
56 try:
57 with open(cache_file, mode='r', encoding='utf-8') as fp:
58 archives = set(json.load(fp))
59 logger.info('Loaded archive list from cache')
60 except FileNotFoundError:
61 pass
62
63 if not archives:
64 logger.info('Loading archive list from remote...')
65 with subprocess.Popen(['borg', 'list', '--info', '--lock-wait=600', '--json', target], stdout=subprocess.PIPE, preexec_fn=lambda: as_borg()) as proc:
66 archives = set([archive['barchive'] for archive in json.load(proc.stdout)['archives']])
67 if cache_file:
68 logger.debug('Saving archive list to cache...')
69 with atomic_write(cache_file, mode='w', encoding='utf-8', overwrite=True) as fp:
70 json.dump(list(archives), fp)
71
72 # logger.debug(f'archives: {archives}')
73 _, _, archive_name = _archive_name(snapshot, target, archive_prefix).partition('::')
74 if archive_name in archives:
75 logger.info(f'{archive_name} found')
76 return 0
77 else:
78 logger.info(f'{archive_name} not found')
79 return 126
80
81def create(*, snapshot, target, archive_prefix, dry_run):
82 basename = _archive_basename(snapshot, archive_prefix)
83
84 with TemporaryDirectory(prefix=f'borg-mount_{basename}_', dir=os.environ.get('RUNTIME_DIRECTORY')) as tmpdir:
85 child = os.fork()
86 if child == 0:
87 unshare.unshare(unshare.CLONE_NEWNS)
88 subprocess.run(['mount', '--make-rprivate', '/'], check=True)
89 chroot = pathlib.Path(tmpdir) / 'chroot'
90 upper = pathlib.Path(tmpdir) / 'upper'
91 work = pathlib.Path(tmpdir) / 'work'
92 for path in [chroot,upper,work]:
93 path.mkdir()
94 subprocess.run(['mount', '-t', 'overlay', 'overlay', '-o', f'lowerdir=/,upperdir={upper},workdir={work}', chroot], check=True)
95 bindMounts = ['nix', 'run', 'run/secrets.d', 'run/wrappers', 'proc', 'dev', 'sys', pathlib.Path(os.path.expanduser('~')).relative_to('/')]
96 if os.environ.get('BORG_BASE_DIR'):
97 bindMounts.append(pathlib.Path(os.environ['BORG_BASE_DIR']).relative_to('/'))
98 if 'SSH_AUTH_SOCK' in os.environ:
99 bindMounts.append(pathlib.Path(os.environ['SSH_AUTH_SOCK']).parent.relative_to('/'))
100 for bindMount in bindMounts:
101 (chroot / bindMount).mkdir(parents=True,exist_ok=True)
102 # print(*['mount', '--bind', pathlib.Path('/') / bindMount, chroot / bindMount], file=stderr)
103 subprocess.run(['mount', '--bind', pathlib.Path('/') / bindMount, chroot / bindMount], check=True)
104 os.chroot(chroot)
105 os.chdir('/')
106 dir = pathlib.Path('/borg')
107 dir.mkdir(parents=True,exist_ok=True,mode=0o0750)
108 os.chown(dir, borg_pwd.pw_uid, borg_pwd.pw_gid)
109 try:
110 subprocess.run(['mount', '-t', 'zfs', '-o', 'ro', snapshot, dir], check=True)
111 env = os.environ.copy()
112 create_args = ['borg',
113 'create',
114 '--lock-wait=600',
115 '--one-file-system',
116 '--compression=auto,zstd,10',
117 '--chunker-params=10,23,16,4095',
118 '--files-cache=ctime,size',
119 '--show-rc',
120 # '--remote-ratelimit=20480',
121 '--progress',
122 '--list',
123 '--filter=AMEi-x?',
124 '--stats' if not dry_run else '--dry-run'
125 ]
126 _, _, ts = snapshot.rpartition('@')
127 creation_time = isoparse(ts).astimezone(timezone.utc)
128 create_args += [f'--timestamp={creation_time.strftime("%Y-%m-%dT%H:%M:%S")}']
129 env['BORG_FILES_CACHE_SUFFIX'] = basename
130 create_args += [_archive_name(snapshot, target, archive_prefix), '.']
131 print({'create_args': create_args, 'cwd': dir, 'env': env}, file=sys.stderr)
132 subprocess.run(create_args, stdin=subprocess.DEVNULL, env=env, preexec_fn=lambda: as_borg(caps={CAP.DAC_READ_SEARCH}, cwd=dir), check=True)
133 # subprocess.run(create_args, stdin=subprocess.DEVNULL, env=env, preexec_fn=lambda: None, cwd=dir, check=True)
134 finally:
135 subprocess.run(['umount', dir], check=True)
136 os._exit(0)
137 else:
138 while True:
139 waitpid, waitret = os.wait()
140 if waitret != 0:
141 sys.exit(waitret)
142 if waitpid == child:
143 break
144 return 0
145
146def sigterm(signum, frame):
147 raise SystemExit(128 + signum)
148
149def main():
150 signal.signal(signal.SIGTERM, sigterm)
151
152 global logger
153 logger = logging.getLogger(__name__)
154 console_handler = logging.StreamHandler()
155 console_handler.setFormatter( logging.Formatter('[%(levelname)s](%(name)s): %(message)s') )
156 if sys.stderr.isatty():
157 console_handler.setFormatter( logging.Formatter('%(asctime)s [%(levelname)s](%(name)s): %(message)s') )
158 logger.addHandler(console_handler)
159
160 # log uncaught exceptions
161 def log_exceptions(type, value, tb):
162 global logger
163
164 logger.error(value)
165 sys.__excepthook__(type, value, tb) # calls default excepthook
166
167 sys.excepthook = log_exceptions
168
169 parser = argparse.ArgumentParser(prog='borgsnap')
170 parser.add_argument('--verbose', '-v', action='count', default=0)
171 parser.add_argument('--target', metavar='REPO', default='yggdrasil.borgbase:repo')
172 parser.add_argument('--archive-prefix', metavar='REPO', default='yggdrasil.vidhar.')
173 subparsers = parser.add_subparsers()
174 subparsers.required = True
175 parser.set_defaults(cmd=None)
176 check_parser = subparsers.add_parser('check')
177 check_parser.add_argument('--cache-file', type=lambda p: Path(p).absolute(), default=None)
178 check_parser.add_argument('snapshot')
179 check_parser.set_defaults(cmd=check)
180 create_parser = subparsers.add_parser('create')
181 create_parser.add_argument('--dry-run', '-n', action='store_true', default=False)
182 create_parser.add_argument('snapshot')
183 create_parser.set_defaults(cmd=create)
184 args = parser.parse_args()
185
186 if args.verbose <= 0:
187 logger.setLevel(logging.WARNING)
188 elif args.verbose <= 1:
189 logger.setLevel(logging.INFO)
190 else:
191 logger.setLevel(logging.DEBUG)
192
193 cmdArgs = {}
194 for copy in {'target', 'archive_prefix', 'snapshot', 'cache_file', 'dry_run'}:
195 if copy in vars(args):
196 cmdArgs[copy] = vars(args)[copy]
197
198 return args.cmd(**cmdArgs)
199
200
201if __name__ == '__main__':
202 sys.exit(main())
diff --git a/hosts/vidhar/borg/borgsnap/setup.py b/hosts/vidhar/borg/borgsnap/setup.py
new file mode 100644
index 00000000..76356bfc
--- /dev/null
+++ b/hosts/vidhar/borg/borgsnap/setup.py
@@ -0,0 +1,10 @@
1from setuptools import setup
2
3setup(name='borgsnap',
4 packages=['borgsnap'],
5 entry_points={
6 'console_scripts': [
7 'borgsnap=borgsnap.__main__:main',
8 ],
9 }
10)
diff --git a/hosts/vidhar/borg/copy.py b/hosts/vidhar/borg/copy.py
index 4e9599b8..b9b667f2 100755
--- a/hosts/vidhar/borg/copy.py
+++ b/hosts/vidhar/borg/copy.py
@@ -71,7 +71,7 @@ def read_repo(path):
71 71
72class ToSync: 72class ToSync:
73 to_sync = deque() 73 to_sync = deque()
74 74
75 def __iter__(self): 75 def __iter__(self):
76 return self 76 return self
77 77
@@ -267,7 +267,7 @@ def sigterm(signum, frame):
267 267
268def main(): 268def main():
269 signal.signal(signal.SIGTERM, sigterm) 269 signal.signal(signal.SIGTERM, sigterm)
270 270
271 if "::" in args.source: 271 if "::" in args.source:
272 (src_repo_path, _, src_archive) = args.source.partition("::") 272 (src_repo_path, _, src_archive) = args.source.partition("::")
273 entry = None 273 entry = None
diff --git a/hosts/vidhar/borg/default.nix b/hosts/vidhar/borg/default.nix
index 579630a9..650c91ee 100644
--- a/hosts/vidhar/borg/default.nix
+++ b/hosts/vidhar/borg/default.nix
@@ -1,23 +1,28 @@
1{ config, pkgs, lib, ... }: 1{ config, pkgs, lib, flakeInputs, ... }:
2 2
3with lib; 3with lib;
4 4
5let 5let
6 sshConfig = pkgs.writeText "config" ''
7 Include /etc/ssh/ssh_config
8
9 ControlMaster auto
10 ControlPath /var/lib/borg/.borgssh-master-%r@%n:%p
11 ControlPersist yes
12
13 Host yggdrasil.borgbase
14 HostName nx69hpl8.repo.borgbase.com
15 User nx69hpl8
16 IdentityFile ${config.sops.secrets."append.borgbase".path}
17 IdentitiesOnly yes
18
19 BatchMode yes
20 ServerAliveInterval 10
21 ServerAliveCountMax 30
22 '';
23
6 copyService = { repo, repoEscaped }: let 24 copyService = { repo, repoEscaped }: let
7 serviceName = "copy-borg@${repoEscaped}"; 25 serviceName = "copy-borg@${repoEscaped}";
8 sshConfig = pkgs.writeText "config" ''
9 Include /etc/ssh/ssh_config
10
11 Host yggdrasil.borgbase
12 HostName nx69hpl8.repo.borgbase.com
13 User nx69hpl8
14 IdentityFile ${config.sops.secrets."append.borgbase".path}
15 IdentitiesOnly yes
16
17 BatchMode yes
18 ServerAliveInterval 10
19 ServerAliveCountMax 30
20 '';
21 in nameValuePair serviceName { 26 in nameValuePair serviceName {
22 serviceConfig = { 27 serviceConfig = {
23 Type = "oneshot"; 28 Type = "oneshot";
@@ -72,8 +77,63 @@ let
72 --prefix PATH : ${makeBinPath (with pkgs; [utillinux borgbackup])}:${config.security.wrapperDir} 77 --prefix PATH : ${makeBinPath (with pkgs; [utillinux borgbackup])}:${config.security.wrapperDir}
73 ''; 78 '';
74 }); 79 });
80
81 borgsnap = flakeInputs.mach-nix.lib.${config.nixpkgs.system}.buildPythonPackage rec {
82 pname = "borgsnap";
83 src = ./borgsnap;
84 version = "0.0.0";
85 ignoreDataOutdated = true;
86
87 requirements = ''
88 atomicwrites
89 pyprctl
90 python-unshare
91 xdg
92 python-dateutil
93 '';
94 postInstall = ''
95 wrapProgram $out/bin/borgsnap \
96 --prefix PATH : ${makeBinPath (with pkgs; [utillinux borgbackup])}:${config.security.wrapperDir}
97 '';
98
99 providers.python-unshare = "nixpkgs";
100 overridesPre = [
101 (self: super: { python-unshare = super.python-unshare.overrideAttrs (oldAttrs: { name = "python-unshare-0.2.1"; version = "0.2.1"; }); })
102 ];
103
104 _.xdg.buildInputs.add = with pkgs."python3Packages"; [ poetry ];
105 _.tomli.buildInputs.add = with pkgs."python3Packages"; [ flit-core ];
106 };
75in { 107in {
76 config = { 108 config = {
109 services.zfssnap.config.exec = {
110 check = "${borgsnap}/bin/borgsnap -vvv --target yggdrasil.borgbase:repo --archive-prefix yggdrasil.vidhar. check --cache-file /run/zfssnap-prune/archives-cache.json";
111 cmd = "${borgsnap}/bin/borgsnap -vvv --target yggdrasil.borgbase:repo --archive-prefix yggdrasil.vidhar. create --dry-run";
112
113 halfweekly = "8";
114 monthly = "-1";
115 };
116
117 systemd.services = {
118 "zfssnap-prune" = {
119 serviceConfig = {
120 Environment = [
121 "BORG_RSH=\"${pkgs.openssh}/bin/ssh -F ${sshConfig}\""
122 "BORG_BASE_DIR=/var/lib/borg"
123 "BORG_CONFIG_DIR=/var/lib/borg/config"
124 "BORG_CACHE_DIR=/var/lib/borg/cache"
125 "BORG_SECURITY_DIR=/var/lib/borg/security"
126 "BORG_KEYS_DIR=/var/lib/borg/keys"
127 "BORG_KEY_FILE=${config.sops.secrets."yggdrasil.borgkey".path}"
128 "BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes"
129 "BORG_HOSTNAME_IS_UNIQUE=yes"
130 ];
131 RuntimeDirectory = "zfssnap-prune";
132 };
133 };
134 } // listToAttrs (map copyService [{ repo = "/srv/backup/borg/jotnar"; repoEscaped = "srv-backup-borg-jotnar"; }]);
135
136
77 services.borgbackup.repos.jotnar = { 137 services.borgbackup.repos.jotnar = {
78 path = "/srv/backup/borg/jotnar"; 138 path = "/srv/backup/borg/jotnar";
79 authorizedKeysAppendOnly = let 139 authorizedKeysAppendOnly = let
@@ -111,11 +171,9 @@ in {
111 mode = "0400"; 171 mode = "0400";
112 }; 172 };
113 173
114 systemd.services = listToAttrs (map copyService [{ repo = "/srv/backup/borg/jotnar"; repoEscaped = "srv-backup-borg-jotnar"; }]);
115
116 systemd.timers."copy-borg@srv-backup-borg-jotnar" = { 174 systemd.timers."copy-borg@srv-backup-borg-jotnar" = {
117 wantedBy = ["multi-user.target"]; 175 wantedBy = ["multi-user.target"];
118 176
119 timerConfig = { 177 timerConfig = {
120 OnCalendar = "*-*-* 00/4:00:00 Europe/Berlin"; 178 OnCalendar = "*-*-* 00/4:00:00 Europe/Berlin";
121 }; 179 };
diff --git a/hosts/vidhar/default.nix b/hosts/vidhar/default.nix
index 121cc9df..3f5d17d5 100644
--- a/hosts/vidhar/default.nix
+++ b/hosts/vidhar/default.nix
@@ -1,4 +1,7 @@
1{ hostName, flake, config, pkgs, lib, ... }: 1{ hostName, flake, config, pkgs, lib, ... }:
2
3with lib;
4
2{ 5{
3 imports = with flake.nixosModules.systemProfiles; [ 6 imports = with flake.nixosModules.systemProfiles; [
4 ./zfs.nix ./network ./samba.nix ./dns ./prometheus ./borg 7 ./zfs.nix ./network ./samba.nix ./dns ./prometheus ./borg
@@ -39,7 +42,7 @@
39 luks.devices = { 42 luks.devices = {
40 nvm0 = { device = "/dev/disk/by-label/${hostName}-nvm0"; bypassWorkqueues = true; }; 43 nvm0 = { device = "/dev/disk/by-label/${hostName}-nvm0"; bypassWorkqueues = true; };
41 nvm1 = { device = "/dev/disk/by-label/${hostName}-nvm1"; bypassWorkqueues = true; }; 44 nvm1 = { device = "/dev/disk/by-label/${hostName}-nvm1"; bypassWorkqueues = true; };
42 45
43 hdd0.device = "/dev/disk/by-label/${hostName}-hdd0"; 46 hdd0.device = "/dev/disk/by-label/${hostName}-hdd0";
44 hdd1.device = "/dev/disk/by-label/${hostName}-hdd1"; 47 hdd1.device = "/dev/disk/by-label/${hostName}-hdd1";
45 hdd2.device = "/dev/disk/by-label/${hostName}-hdd2"; 48 hdd2.device = "/dev/disk/by-label/${hostName}-hdd2";
@@ -58,7 +61,7 @@
58 options = [ "mode=0755" ]; 61 options = [ "mode=0755" ];
59 }; 62 };
60 }; 63 };
61 64
62 services.timesyncd.enable = false; 65 services.timesyncd.enable = false;
63 services.chrony = { 66 services.chrony = {
64 enable = true; 67 enable = true;
@@ -132,6 +135,7 @@
132 access_log syslog:server=unix:/dev/log main; 135 access_log syslog:server=unix:/dev/log main;
133 error_log syslog:server=unix:/dev/log info; 136 error_log syslog:server=unix:/dev/log info;
134 137
138 client_body_buffer_size 16m;
135 client_body_temp_path /run/nginx-client-bodies; 139 client_body_temp_path /run/nginx-client-bodies;
136 ''; 140 '';
137 upstreams.grafana = { 141 upstreams.grafana = {
@@ -173,12 +177,12 @@
173 sopsFile = ./selfsigned.key; 177 sopsFile = ./selfsigned.key;
174 }; 178 };
175 systemd.services.nginx = { 179 systemd.services.nginx = {
176 preStart = lib.mkForce config.services.nginx.preStart; 180 preStart = mkForce config.services.nginx.preStart;
177 serviceConfig = { 181 serviceConfig = {
178 ExecReload = lib.mkForce "${pkgs.coreutils}/bin/kill -HUP $MAINPID"; 182 ExecReload = mkForce "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
179 LoadCredential = [ "selfsigned.key:${config.sops.secrets."selfsigned.key".path}" ]; 183 LoadCredential = [ "selfsigned.key:${config.sops.secrets."selfsigned.key".path}" ];
180 184
181 RuntimeDirectory = lib.mkForce [ "nginx" "nginx-client-bodies" ]; 185 RuntimeDirectory = mkForce [ "nginx" "nginx-client-bodies" ];
182 RuntimeDirectoryMode = "0750"; 186 RuntimeDirectoryMode = "0750";
183 }; 187 };
184 }; 188 };
@@ -232,7 +236,7 @@
232 }; 236 };
233 }; 237 };
234 systemd.services.loki.preStart = let 238 systemd.services.loki.preStart = let
235 rulesYaml = lib.generators.toYAML {} { 239 rulesYaml = generators.toYAML {} {
236 groups = [ 240 groups = [
237 { name = "power-failures"; 241 { name = "power-failures";
238 rules = [ 242 rules = [
@@ -311,6 +315,29 @@
311 timers.wants = ["systemd-tmpfiles-clean.timer"]; 315 timers.wants = ["systemd-tmpfiles-clean.timer"];
312 }; 316 };
313 317
318 services.smartd = {
319 enable = true;
320 autodetect = false;
321 defaults.monitored = "-a -o on -s (S/../.././02|L/../../7/04)";
322 devices = map (dev: { device = "/dev/disk/by-path/${dev}"; }) [
323 "pci-0000:00:1f.2-ata-1"
324 "pci-0000:00:1f.2-ata-3"
325 "pci-0000:00:1f.2-ata-4"
326 "pci-0000:00:1f.2-ata-5"
327 "pci-0000:00:1f.2-ata-6"
328 "pci-0000:02:00.0-nvme-1"
329 "pci-0000:05:00.0-sas-phy0-lun-0"
330 "pci-0000:05:00.0-sas-phy1-lun-0"
331 "pci-0000:06:00.0-nvme-1"
332 ];
333 notifications = {
334 test = false;
335 mail.enable = false;
336 x11.enable = false;
337 wall.enable = false;
338 };
339 };
340
314 environment.systemPackages = with pkgs; [iotop vmtouch]; 341 environment.systemPackages = with pkgs; [iotop vmtouch];
315 342
316 system.stateVersion = "21.05"; 343 system.stateVersion = "21.05";
diff --git a/hosts/vidhar/prometheus/default.nix b/hosts/vidhar/prometheus/default.nix
index 4c23d8a9..7ac86c30 100644
--- a/hosts/vidhar/prometheus/default.nix
+++ b/hosts/vidhar/prometheus/default.nix
@@ -34,20 +34,6 @@ in {
34 enable = true; 34 enable = true;
35 enabledCollectors = []; 35 enabledCollectors = [];
36 }; 36 };
37 smartctl = {
38 enable = true;
39 devices = map (dev: "/dev/disk/by-path/${dev}") [
40 "pci-0000:00:1f.2-ata-1"
41 "pci-0000:00:1f.2-ata-3"
42 "pci-0000:00:1f.2-ata-4"
43 "pci-0000:00:1f.2-ata-5"
44 "pci-0000:00:1f.2-ata-6"
45 "pci-0000:02:00.0-nvme-1"
46 "pci-0000:05:00.0-sas-phy0-lun-0"
47 "pci-0000:05:00.0-sas-phy1-lun-0"
48 "pci-0000:06:00.0-nvme-1"
49 ];
50 };
51 snmp = { 37 snmp = {
52 enable = true; 38 enable = true;
53 configurationPath = ./snmp.yml; 39 configurationPath = ./snmp.yml;
@@ -124,10 +110,10 @@ in {
124 } 110 }
125 { job_name = "smartctl"; 111 { job_name = "smartctl";
126 static_configs = [ 112 static_configs = [
127 { targets = ["localhost:${toString config.services.prometheus.exporters.smartctl.port}"]; } 113 { targets = ["localhost:9633"]; }
128 ]; 114 ];
129 relabel_configs = relabelHosts; 115 relabel_configs = relabelHosts;
130 scrape_interval = "1s"; 116 scrape_interval = "60s";
131 } 117 }
132 { job_name = "snmp"; 118 { job_name = "snmp";
133 static_configs = [ 119 static_configs = [
@@ -376,6 +362,30 @@ in {
376 }; 362 };
377 }; 363 };
378 364
365 systemd.services."prometheus-smartctl-exporter" = {
366 wantedBy = [ "multi-user.target" ];
367 after = [ "network.target" ];
368 path = with pkgs; [ smartmontools ];
369 serviceConfig = {
370 Restart = "always";
371
372 CapabilityBoundingSet = ["CAP_DAC_OVERRIDE" "CAP_SYS_RAWIO" "CAP_SYS_ADMIN"];
373 AmbientCapabilities = ["CAP_DAC_OVERRIDE" "CAP_SYS_RAWIO" "CAP_SYS_ADMIN"];
374 ProtectSystem = "strict";
375 DynamicUser = true;
376 LockPersonality = true;
377 MemoryDenyWriteExecute = true;
378 NoNewPrivileges = true;
379 PrivateDevices = false;
380 PrivateTmp = true;
381 ProcSubset = "pid";
382
383 Type = "simple";
384 ExecStart = "${pkgs.smartprom}/bin/smartprom";
385 Environment = "SMARTCTL_EXPORTER_PORT=9633";
386 };
387 };
388
379 systemd.services."prometheus-systemd-exporter" = let 389 systemd.services."prometheus-systemd-exporter" = let
380 cfg = config.services.prometheus.exporters.systemd; 390 cfg = config.services.prometheus.exporters.systemd;
381 in { 391 in {
@@ -385,14 +395,6 @@ in {
385 ''; 395 '';
386 }; 396 };
387 397
388 systemd.services."prometheus-smartctl-exporter" = {
389 serviceConfig = {
390 DeviceAllow = lib.mkForce config.services.prometheus.exporters.smartctl.devices;
391 CapabilityBoundingSet = lib.mkForce ["CAP_SYS_ADMIN"];
392 AmbientCapabilities = lib.mkForce ["CAP_SYS_ADMIN"];
393 };
394 };
395
396 services.nginx = { 398 services.nginx = {
397 upstreams.prometheus = { 399 upstreams.prometheus = {
398 servers = { "localhost:${toString config.services.prometheus.port}" = {}; }; 400 servers = { "localhost:${toString config.services.prometheus.port}" = {}; };
diff --git a/hosts/vidhar/zfs.nix b/hosts/vidhar/zfs.nix
index ef285536..52b48aca 100644
--- a/hosts/vidhar/zfs.nix
+++ b/hosts/vidhar/zfs.nix
@@ -130,7 +130,21 @@
130 echo "=== ZPOOL IMPORT COMPLETE ===" 130 echo "=== ZPOOL IMPORT COMPLETE ==="
131 ''; 131 '';
132 132
133 services.zfssnap.enable = true; 133 services.zfssnap = {
134 enable = true;
135 config.keep = {
136 within = "15m";
137 "5m" = "48";
138 "15m" = "32";
139 hourly = "48";
140 "4h" = "24";
141 "12h" = "12";
142 daily = "62";
143 halfweekly = "32";
144 weekly = "24";
145 monthly = "-1";
146 };
147 };
134 services.zfs.trim.enable = false; 148 services.zfs.trim.enable = false;
135 services.zfs.autoScrub = { 149 services.zfs.autoScrub = {
136 enable = true; 150 enable = true;
diff --git a/modules/postfwd.nix b/modules/postfwd.nix
new file mode 100644
index 00000000..4afea0a1
--- /dev/null
+++ b/modules/postfwd.nix
@@ -0,0 +1,65 @@
1{ config, lib, pkgs, ... }:
2
3with lib;
4
5let
6 cfg = config.services.postfwd;
7in {
8 options = {
9 services.postfwd = with types; {
10 enable = mkEnableOption "postfwd3 - postfix firewall daemon";
11
12 rules = mkOption {
13 type = lines;
14 default = "";
15 };
16 };
17 };
18
19 config = mkIf cfg.enable {
20 systemd.services.postfwd = {
21 description = "postfwd3 - postfix firewall daemon";
22 wantedBy = ["multi-user.target"];
23 before = ["postfix.service"];
24
25 serviceConfig = {
26 Type = "forking";
27
28 ExecStart = "${pkgs.postfwd}/bin/postfwd3 ${escapeShellArgs [
29 "-vv"
30 "--daemon" "--user" "postfwd" "--group" "postfwd"
31 "--pidfile" "/run/postfwd3/postfwd3.pid"
32 "--proto" "unix"
33 "--port" "/run/postfwd3/postfwd3.sock"
34 "--save_rates" "/var/lib/postfwd/rates"
35 "--file" (pkgs.writeText "postfwd3-rules" cfg.rules)
36 ]}";
37 PIDFile = "/run/postfwd3/postfwd3.pid";
38
39 Restart = "always";
40 RestartSec = 5;
41 TimeoutSec = 10;
42
43 RuntimeDirectory = ["postfwd3"];
44 StateDirectory = ["postfwd"];
45
46 DynamicUser = true;
47 ProtectSystem = "strict";
48 SystemCallFilter = "@system-service";
49 NoNewPrivileges = true;
50 ProtectKernelTunables = true;
51 ProtectKernelModules = true;
52 ProtectKernelLogs = true;
53 ProtectControlGroups = true;
54 MemoryDenyWriteExecute = true;
55 RestrictSUIDSGID = true;
56 KeyringMode = "private";
57 ProtectClock = true;
58 RestrictRealtime = true;
59 PrivateDevices = true;
60 PrivateTmp = true;
61 ProtectHostname = true;
62 };
63 };
64 };
65}
diff --git a/modules/zfssnap/default.nix b/modules/zfssnap/default.nix
index d1080e8a..f3e2f9c2 100644
--- a/modules/zfssnap/default.nix
+++ b/modules/zfssnap/default.nix
@@ -1,7 +1,7 @@
1{ config, pkgs, lib, ... }: 1{ config, pkgs, lib, ... }:
2 2
3with lib; 3with lib;
4 4
5let 5let
6 zfssnap = pkgs.stdenv.mkDerivation rec { 6 zfssnap = pkgs.stdenv.mkDerivation rec {
7 name = "zfssnap"; 7 name = "zfssnap";
@@ -37,7 +37,7 @@ in {
37 options = { 37 options = {
38 services.zfssnap = { 38 services.zfssnap = {
39 enable = mkEnableOption "zfssnap service"; 39 enable = mkEnableOption "zfssnap service";
40 40
41 config = mkOption { 41 config = mkOption {
42 type = with types; attrsOf (attrsOf str); 42 type = with types; attrsOf (attrsOf str);
43 default = { 43 default = {
@@ -82,7 +82,7 @@ in {
82 ExecStart = let 82 ExecStart = let
83 mkSectionName = name: strings.escape [ "[" "]" ] (strings.toUpper name); 83 mkSectionName = name: strings.escape [ "[" "]" ] (strings.toUpper name);
84 zfssnapConfig = generators.toINI { inherit mkSectionName; } cfg.config; 84 zfssnapConfig = generators.toINI { inherit mkSectionName; } cfg.config;
85 in "${zfssnap}/bin/zfssnap -v prune --config=${pkgs.writeText "zfssnap.ini" zfssnapConfig}"; 85 in "${zfssnap}/bin/zfssnap -vv prune --config=${pkgs.writeText "zfssnap.ini" zfssnapConfig}";
86 }; 86 };
87 }; 87 };
88 88
diff --git a/modules/zfssnap/zfssnap.py b/modules/zfssnap/zfssnap.py
index 21ed1d5b..a8dae75f 100644
--- a/modules/zfssnap/zfssnap.py
+++ b/modules/zfssnap/zfssnap.py
@@ -3,9 +3,9 @@
3import csv 3import csv
4import subprocess 4import subprocess
5import io 5import io
6from distutils.util import strtobool 6from distutils.util import strtobool
7from datetime import datetime, timezone, timedelta 7from datetime import datetime, timezone, timedelta
8from dateutil.tz import gettz, tzlocal 8from dateutil.tz import gettz, tzutc
9import pytimeparse 9import pytimeparse
10import argparse 10import argparse
11import re 11import re
@@ -27,6 +27,36 @@ from math import floor
27 27
28import asyncio 28import asyncio
29 29
30from dataclasses import dataclass
31
32
33TIME_PATTERNS = OrderedDict([
34 ("secondly", lambda t: t.strftime('%Y-%m-%d %H:%M:%S')),
35 ("minutely", lambda t: t.strftime('%Y-%m-%d %H:%M')),
36 ("5m", lambda t: (t.strftime('%Y-%m-%d %H'), floor(t.minute / 5) * 5)),
37 ("15m", lambda t: (t.strftime('%Y-%m-%d %H'), floor(t.minute / 15) * 15)),
38 ("hourly", lambda t: t.strftime('%Y-%m-%d %H')),
39 ("4h", lambda t: (t.strftime('%Y-%m-%d'), floor(t.hour / 4) * 4)),
40 ("12h", lambda t: (t.strftime('%Y-%m-%d'), floor(t.hour / 12) * 12)),
41 ("daily", lambda t: t.strftime('%Y-%m-%d')),
42 ("halfweekly", lambda t: (t.strftime('%G-%V'), floor(int(t.strftime('%u')) / 4) * 4)),
43 ("weekly", lambda t: t.strftime('%G-%V')),
44 ("monthly", lambda t: t.strftime('%Y-%m')),
45 ("yearly", lambda t: t.strftime('%Y')),
46])
47
48@dataclass(eq=True, order=True, frozen=True)
49class Snap:
50 name: str
51 creation: datetime
52
53@dataclass(eq=True, order=True, frozen=True)
54class KeptBecause:
55 rule: str
56 ix: int
57 base: str
58 period: str
59
30 60
31@cache 61@cache
32def _now(): 62def _now():
@@ -42,56 +72,120 @@ def _log_cmd(*args):
42 72
43def _get_items(): 73def _get_items():
44 items = {} 74 items = {}
45 75
46 args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', '-t', 'filesystem,volume', '-s', 'local,default,inherited,temporary,received', 'li.yggdrasil:auto-snapshot'] 76 args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', '-t', 'filesystem,volume', '-s', 'local,default,inherited,temporary,received', 'li.yggdrasil:auto-snapshot']
47 _log_cmd(*args) 77 _log_cmd(*args)
48 with subprocess.Popen(args, stdout=subprocess.PIPE) as proc: 78 with subprocess.Popen(args, stdout=subprocess.PIPE) as proc:
49 text_stdout = io.TextIOWrapper(proc.stdout) 79 text_stdout = io.TextIOWrapper(proc.stdout)
50 reader = csv.reader(text_stdout, delimiter='\t', quoting=csv.QUOTE_NONE) 80 reader = csv.DictReader(text_stdout, fieldnames=['name', 'setting'], delimiter='\t', quoting=csv.QUOTE_NONE)
51 Row = namedtuple('Row', ['name', 'setting']) 81 Row = namedtuple('Row', reader.fieldnames)
52 for row in map(Row._make, reader): 82 for row in [Row(**data) for data in reader]:
53 items[row.name] = bool(strtobool(row.setting)) 83 items[row.name] = bool(strtobool(row.setting))
54 84
55 return items 85 return items
56 86
57def prune(config, dry_run, keep_newest): 87def _get_snaps(only_auto=True):
58 prunable_snapshots = set() 88 snapshots = defaultdict(list)
59 args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', '-t', 'snapshot', '-s', 'local', 'li.yggdrasil:is-auto-snapshot'] 89 args = ['zfs', 'list', '-H', '-p', '-t', 'snapshot', '-o', 'name,li.yggdrasil:is-auto-snapshot,creation']
60 _log_cmd(*args)
61 with subprocess.Popen(args, stdout=subprocess.PIPE) as proc:
62 text_stdout = io.TextIOWrapper(proc.stdout)
63 reader = csv.reader(text_stdout, delimiter='\t', quoting=csv.QUOTE_NONE)
64 Row = namedtuple('Row', ['name', 'is_auto_snapshot'])
65 for row in map(Row._make, reader):
66 if bool(strtobool(row.is_auto_snapshot)):
67 prunable_snapshots.add(row.name)
68
69 items = defaultdict(list)
70 Snap = namedtuple('Snap', ['name', 'creation'])
71 args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', '-t', 'snapshot', 'creation']
72 _log_cmd(*args) 90 _log_cmd(*args)
73 with subprocess.Popen(args, stdout=subprocess.PIPE) as proc: 91 with subprocess.Popen(args, stdout=subprocess.PIPE) as proc:
74 text_stdout = io.TextIOWrapper(proc.stdout) 92 text_stdout = io.TextIOWrapper(proc.stdout)
75 reader = csv.reader(text_stdout, delimiter='\t', quoting=csv.QUOTE_NONE) 93 reader = csv.DictReader(text_stdout, fieldnames=['name', 'is_auto_snapshot', 'timestamp'], delimiter='\t', quoting=csv.QUOTE_NONE)
76 Row = namedtuple('Row', ['name', 'timestamp']) 94 Row = namedtuple('Row', reader.fieldnames)
77 for row in map(Row._make, reader): 95 for row in [Row(**data) for data in reader]:
78 if row.name not in prunable_snapshots: 96 if only_auto and not bool(strtobool(row.is_auto_snapshot)):
79 continue 97 continue
80 98
81 base_name, _, _ = row.name.rpartition('@') 99 base_name, _, _ = row.name.rpartition('@')
82 creation = datetime.fromtimestamp(int(row.timestamp), timezone.utc) 100 creation = datetime.fromtimestamp(int(row.timestamp), timezone.utc)
83 items[base_name].append(Snap(name=row.name, creation=creation)) 101 snapshots[base_name].append(Snap(name=row.name, creation=creation))
102
103 return snapshots
104
105def prune(config, dry_run, keep_newest, do_exec):
106 do_exec = do_exec and 'EXEC' in config
107 prune_timezone = config.gettimezone('KEEP', 'timezone', fallback=tzutc())
108 logger.debug(f'prune timezone: {prune_timezone}')
109
110 items = _get_snaps()
111
112 exec_candidates = set()
113 if do_exec:
114 exec_timezone = config.gettimezone('EXEC', 'timezone', fallback=prune_timezone)
115 logger.debug(f'exec timezone: {exec_timezone}')
116
117 for rule, pattern in TIME_PATTERNS.items():
118 desired_count = config.getint('EXEC', rule, fallback=0)
119
120 for base, snaps in items.items():
121 periods = OrderedDict()
122
123 for snap in sorted(snaps, key=lambda snap: snap.creation):
124 period = pattern(snap.creation.astimezone(exec_timezone))
125 if period not in periods:
126 periods[period] = deque()
127 periods[period].append(snap)
128
129 to_exec = desired_count
130 ordered_periods = periods.items()
131 for period, period_snaps in ordered_periods:
132 if to_exec == 0:
133 break
134
135 for snap in period_snaps:
136 exec_candidates.add(snap)
137 logger.debug(f'{snap.name} is exec candidate')
138 to_exec -= 1
139 break
140
141 if to_exec > 0:
142 logger.debug(f'Missing {to_exec} to fulfill exec {rule}={desired_count} for ‘{base}’')
143
144 check_cmd = config.get('EXEC', 'check', fallback=None)
145 if check_cmd:
146 already_execed = set()
147 for snap in exec_candidates:
148 args = []
149 args += shlex.split(check_cmd)
150 args += [snap.name]
151 _log_cmd(*args)
152 check_res = subprocess.run(args)
153 if check_res.returncode == 0:
154 already_execed.add(snap)
155 logger.debug(f'{snap.name} already execed')
156 exec_candidates -= already_execed
157
158 exec_cmd = config.get('EXEC', 'cmd', fallback=None)
159 exec_count = config.getint('EXEC', 'count', fallback=1)
160 if exec_cmd:
161 execed = set()
162 for snap in sorted(exec_candidates, key=lambda snap: snap.creation):
163 if len(execed) >= exec_count:
164 logger.debug(f'exc_count of {exec_count} reached')
165 break
166
167 args = []
168 args += shlex.split(exec_cmd)
169 args += [snap.name]
170 _log_cmd(*args)
171 subprocess.run(args).check_returncode()
172 execed.add(snap)
173
174 exec_candidates -= execed
84 175
85 kept_count = defaultdict(lambda: defaultdict(lambda: 0)) 176 kept_count = defaultdict(lambda: defaultdict(lambda: 0))
86 KeptBecause = namedtuple('KeptBecause', ['rule', 'ix', 'base', 'period'])
87 kept_because = OrderedDict() 177 kept_because = OrderedDict()
88 def keep_because(base, snap, rule, period=None): 178 def keep_because(base, snap, rule, period=None):
89 nonlocal KeptBecause, kept_count, kept_because 179 nonlocal kept_count, kept_because
90 kept_count[rule][base] += 1 180 kept_count[rule][base] += 1
91 if snap not in kept_because: 181 if snap not in kept_because:
92 kept_because[snap] = deque() 182 kept_because[snap] = deque()
93 kept_because[snap].append(KeptBecause(rule=rule, ix=kept_count[rule][base], base=base, period=period)) 183 kept_because[snap].append(KeptBecause(rule=rule, ix=kept_count[rule][base], base=base, period=period))
94 184
185 for candidate in exec_candidates:
186 base_name, _, _ = candidate.name.rpartition('@')
187 keep_because(base_name, candidate.name, 'exec-candidate')
188
95 within = config.gettimedelta('KEEP', 'within') 189 within = config.gettimedelta('KEEP', 'within')
96 if within > timedelta(seconds=0): 190 if within > timedelta(seconds=0):
97 for base, snaps in items.items(): 191 for base, snaps in items.items():
@@ -109,31 +203,14 @@ def prune(config, dry_run, keep_newest):
109 else: 203 else:
110 logger.warn('Skipping rule ‘within’ since retention period is zero') 204 logger.warn('Skipping rule ‘within’ since retention period is zero')
111 205
112 prune_timezone = config.gettimezone('KEEP', 'timezone', fallback=tzlocal) 206 for rule, pattern in TIME_PATTERNS.items():
113
114 PRUNING_PATTERNS = OrderedDict([
115 ("secondly", lambda t: t.strftime('%Y-%m-%d %H:%M:%S')),
116 ("minutely", lambda t: t.strftime('%Y-%m-%d %H:%M')),
117 ("5m", lambda t: (t.strftime('%Y-%m-%d %H'), floor(t.minute / 5) * 5)),
118 ("15m", lambda t: (t.strftime('%Y-%m-%d %H'), floor(t.minute / 15) * 15)),
119 ("hourly", lambda t: t.strftime('%Y-%m-%d %H')),
120 ("4h", lambda t: (t.strftime('%Y-%m-%d'), floor(t.hour / 4) * 4)),
121 ("12h", lambda t: (t.strftime('%Y-%m-%d'), floor(t.hour / 12) * 12)),
122 ("daily", lambda t: t.strftime('%Y-%m-%d')),
123 ("halfweekly", lambda t: (t.strftime('%G-%V'), floor(int(t.strftime('%u')) / 4) * 4)),
124 ("weekly", lambda t: t.strftime('%G-%V')),
125 ("monthly", lambda t: t.strftime('%Y-%m')),
126 ("yearly", lambda t: t.strftime('%Y')),
127 ])
128
129 for rule, pattern in PRUNING_PATTERNS.items():
130 desired_count = config.getint('KEEP', rule, fallback=0) 207 desired_count = config.getint('KEEP', rule, fallback=0)
131 208
132 for base, snaps in items.items(): 209 for base, snaps in items.items():
133 periods = OrderedDict() 210 periods = OrderedDict()
134 211
135 for snap in sorted(snaps, key=lambda snap: snap.creation, reverse=keep_newest): 212 for snap in sorted(snaps, key=lambda snap: snap.creation, reverse=keep_newest):
136 period = pattern(snap.creation) 213 period = pattern(snap.creation.astimezone(prune_timezone))
137 if period not in periods: 214 if period not in periods:
138 periods[period] = deque() 215 periods[period] = deque()
139 periods[period].append(snap) 216 periods[period].append(snap)
@@ -150,7 +227,7 @@ def prune(config, dry_run, keep_newest):
150 break 227 break
151 228
152 if to_keep > 0: 229 if to_keep > 0:
153 logger.debug(f'Missing {to_keep} to fulfill {rule}={desired_count} for ‘{base}’') 230 logger.debug(f'Missing {to_keep} to fulfill prune {rule}={desired_count} for ‘{base}’')
154 231
155 for snap, reasons in kept_because.items(): 232 for snap, reasons in kept_because.items():
156 reasons_str = ', '.join(map(str, reasons)) 233 reasons_str = ', '.join(map(str, reasons))
@@ -171,16 +248,16 @@ def prune(config, dry_run, keep_newest):
171 logger.info(f'Would have pruned ‘{snap}’') 248 logger.info(f'Would have pruned ‘{snap}’')
172 else: 249 else:
173 logger.info(f'Pruned ‘{snap}’') 250 logger.info(f'Pruned ‘{snap}’')
174 251
175def rename(snapshots, destroy=False, set_is_auto=False): 252def rename(snapshots, destroy=False, set_is_auto=False):
176 args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', 'creation', *snapshots] 253 args = ['zfs', 'get', '-H', '-p', '-o', 'name,value', 'creation', *snapshots]
177 _log_cmd(*args) 254 _log_cmd(*args)
178 renamed_to = set() 255 renamed_to = set()
179 with subprocess.Popen(args, stdout=subprocess.PIPE) as proc: 256 with subprocess.Popen(args, stdout=subprocess.PIPE) as proc:
180 text_stdout = io.TextIOWrapper(proc.stdout) 257 text_stdout = io.TextIOWrapper(proc.stdout)
181 reader = csv.reader(text_stdout, delimiter='\t', quoting=csv.QUOTE_NONE) 258 reader = csv.DictReader(text_stdout, fieldnames=['name', 'timestamp'], delimiter='\t', quoting=csv.QUOTE_NONE)
182 Row = namedtuple('Row', ['name', 'timestamp']) 259 Row = namedtuple('Row', reader.fieldnames)
183 for row in map(Row._make, reader): 260 for row in [Row(**data) for data in reader]:
184 creation = datetime.fromtimestamp(int(row.timestamp), timezone.utc) 261 creation = datetime.fromtimestamp(int(row.timestamp), timezone.utc)
185 base_name, _, _ = row.name.rpartition('@') 262 base_name, _, _ = row.name.rpartition('@')
186 new_name = _snap_name(base_name, time=creation) 263 new_name = _snap_name(base_name, time=creation)
@@ -217,7 +294,7 @@ def autosnap():
217 all_snap_names = set() 294 all_snap_names = set()
218 async def do_snapshot(*snap_items, recursive=False): 295 async def do_snapshot(*snap_items, recursive=False):
219 nonlocal items, all_snap_names 296 nonlocal items, all_snap_names
220 snap_names = {_snap_name(item) for item in snap_items} 297 snap_names = {_snap_name(item) for item in snap_items if items[item]}
221 if recursive: 298 if recursive:
222 for snap_item in snap_items: 299 for snap_item in snap_items:
223 all_snap_names |= {_snap_name(item) for item in items if item.startswith(snap_item)} 300 all_snap_names |= {_snap_name(item) for item in items if item.startswith(snap_item)}
@@ -268,7 +345,7 @@ def main():
268 sys.__excepthook__(type, value, tb) # calls default excepthook 345 sys.__excepthook__(type, value, tb) # calls default excepthook
269 346
270 sys.excepthook = log_exceptions 347 sys.excepthook = log_exceptions
271 348
272 parser = argparse.ArgumentParser(prog='zfssnap') 349 parser = argparse.ArgumentParser(prog='zfssnap')
273 parser.add_argument('--verbose', '-v', action='count', default=0) 350 parser.add_argument('--verbose', '-v', action='count', default=0)
274 subparsers = parser.add_subparsers() 351 subparsers = parser.add_subparsers()
@@ -282,6 +359,7 @@ def main():
282 prune_parser.add_argument('--config', '-c', dest='config_files', nargs='*', default=list()) 359 prune_parser.add_argument('--config', '-c', dest='config_files', nargs='*', default=list())
283 prune_parser.add_argument('--dry-run', '-n', action='store_true', default=False) 360 prune_parser.add_argument('--dry-run', '-n', action='store_true', default=False)
284 prune_parser.add_argument('--keep-newest', action='store_true', default=False) 361 prune_parser.add_argument('--keep-newest', action='store_true', default=False)
362 prune_parser.add_argument('--no-exec', dest='do_exec', action='store_false', default=True)
285 prune_parser.set_defaults(cmd=prune) 363 prune_parser.set_defaults(cmd=prune)
286 args = parser.parse_args() 364 args = parser.parse_args()
287 365
@@ -293,7 +371,7 @@ def main():
293 logger.setLevel(logging.DEBUG) 371 logger.setLevel(logging.DEBUG)
294 372
295 cmdArgs = {} 373 cmdArgs = {}
296 for copy in {'snapshots', 'dry_run', 'destroy', 'keep_newest', 'set_is_auto'}: 374 for copy in {'snapshots', 'dry_run', 'destroy', 'keep_newest', 'set_is_auto', 'do_exec'}:
297 if copy in vars(args): 375 if copy in vars(args):
298 cmdArgs[copy] = vars(args)[copy] 376 cmdArgs[copy] = vars(args)[copy]
299 if 'config_files' in vars(args): 377 if 'config_files' in vars(args):
@@ -308,7 +386,7 @@ def main():
308 }) 386 })
309 search_files = args.config_files if args.config_files else [*BaseDirectory.load_config_paths('zfssnap.ini')] 387 search_files = args.config_files if args.config_files else [*BaseDirectory.load_config_paths('zfssnap.ini')]
310 read_files = config.read(search_files) 388 read_files = config.read(search_files)
311 389
312 def format_config_files(files): 390 def format_config_files(files):
313 if not files: 391 if not files:
314 return 'no files' 392 return 'no files'
@@ -323,4 +401,5 @@ def main():
323 401
324 args.cmd(**cmdArgs) 402 args.cmd(**cmdArgs)
325 403
326sys.exit(main()) 404if __name__ == '__main__':
405 sys.exit(main())
diff --git a/nvfetcher.toml b/nvfetcher.toml
index b05862a7..cb460076 100644
--- a/nvfetcher.toml
+++ b/nvfetcher.toml
@@ -58,4 +58,14 @@ fetch.url = "https://github.com/wofr06/lesspipe/archive/refs/tags/v$ver.tar.gz"
58[postfix-mta-sts-resolver] 58[postfix-mta-sts-resolver]
59src.github = "Snawoot/postfix-mta-sts-resolver" 59src.github = "Snawoot/postfix-mta-sts-resolver"
60src.prefix = "v" 60src.prefix = "v"
61fetch.url = "https://github.com/Snawoot/postfix-mta-sts-resolver/archive/refs/tags/v$ver.tar.gz" \ No newline at end of file 61fetch.url = "https://github.com/Snawoot/postfix-mta-sts-resolver/archive/refs/tags/v$ver.tar.gz"
62
63[smartprom]
64src.github = "matusnovak/prometheus-smartctl"
65src.prefix = "v"
66fetch.url = "https://github.com/matusnovak/prometheus-smartctl/archive/refs/tags/v$ver.tar.gz"
67
68[postfwd]
69src.github_tag = "postfwd/postfwd"
70src.prefix = "v"
71fetch.url = "https://github.com/postfwd/postfwd/archive/refs/tags/v$ver.tar.gz" \ No newline at end of file
diff --git a/overlays/postfwd.nix b/overlays/postfwd.nix
new file mode 100644
index 00000000..8a4f4bd8
--- /dev/null
+++ b/overlays/postfwd.nix
@@ -0,0 +1,32 @@
1{ final, prev, sources, ... }:
2let
3 deps = with final.perlPackages; [NetDNS NetServer IOMultiplex NetAddrIP NetCIDRLite DigestMD5 TimeHiRes Storable];
4in {
5 postfwd = prev.stdenv.mkDerivation rec {
6 inherit (sources.postfwd) pname version src;
7
8 nativeBuildInputs = with prev; [ makeWrapper ];
9 propagatedBuildInputs = [final.perlPackages.perl] ++ deps;
10
11 buildPhase = ''
12 runHook preBuild
13
14 substituteInPlace sbin/postfwd3 \
15 --replace "/usr/bin/perl -T" "/usr/bin/perl"
16
17 runHook postBuild
18 '';
19
20 installPhase = ''
21 runHook preInstall
22
23 mkdir -p $out/bin
24 cp -t $out/bin sbin/postfwd3
25
26 wrapProgram $out/bin/postfwd3 \
27 --prefix PERL5LIB : ${final.perlPackages.makePerlPath deps}
28
29 runHook postInstall
30 '';
31 };
32}
diff --git a/overlays/smartprom/default.nix b/overlays/smartprom/default.nix
new file mode 100644
index 00000000..0dd0771b
--- /dev/null
+++ b/overlays/smartprom/default.nix
@@ -0,0 +1,19 @@
1{ final, prev, flakeInputs, sources, ... }:
2{
3 smartprom = flakeInputs.mach-nix.lib.${final.system}.buildPythonPackage rec {
4 inherit (sources.smartprom) src pname version;
5 ignoreDataOutdated = true;
6
7 prePatch = ''
8 mkdir smartprom
9 mv smartprom.py smartprom/__main__.py
10 echo >> smartprom/__init__.py
11
12 substituteAll ${./setup.py} ./setup.py
13 '';
14
15 requirements = ''
16 prometheus_client
17 '';
18 };
19}
diff --git a/overlays/smartprom/setup.py b/overlays/smartprom/setup.py
new file mode 100644
index 00000000..c30fc557
--- /dev/null
+++ b/overlays/smartprom/setup.py
@@ -0,0 +1,11 @@
1from setuptools import setup
2
3setup(name='@pname@',
4 version='@version@',
5 packages=['@pname@'],
6 entry_points={
7 'console_scripts': [
8 '@pname@=@pname@.__main__:main',
9 ],
10 }
11)
diff --git a/overlays/worktime/worktime.py b/overlays/worktime/worktime.py
index 9cfc6cd4..1fc00061 100755
--- a/overlays/worktime/worktime.py
+++ b/overlays/worktime/worktime.py
@@ -117,6 +117,7 @@ class Worktime(object):
117 force_day_to_work = True 117 force_day_to_work = True
118 leave_days = set() 118 leave_days = set()
119 leave_budget = dict() 119 leave_budget = dict()
120 time_per_day = None
120 121
121 @staticmethod 122 @staticmethod
122 def holidays(year): 123 def holidays(year):
@@ -151,10 +152,10 @@ class Worktime(object):
151 def __init__(self, start_datetime=None, end_datetime=None, now=None, include_running=True, force_day_to_work=True, **kwargs): 152 def __init__(self, start_datetime=None, end_datetime=None, now=None, include_running=True, force_day_to_work=True, **kwargs):
152 self.include_running = include_running 153 self.include_running = include_running
153 self.force_day_to_work = force_day_to_work 154 self.force_day_to_work = force_day_to_work
154 155
155 if now: 156 if now:
156 self.now = now 157 self.now = now
157 158
158 config = Worktime.config() 159 config = Worktime.config()
159 config_dir = BaseDirectory.load_first_config('worktime') 160 config_dir = BaseDirectory.load_first_config('worktime')
160 api = TogglAPI(api_token=config['TOGGL']['ApiToken'], workspace_id=config['TOGGL']['Workspace']) 161 api = TogglAPI(api_token=config['TOGGL']['ApiToken'], workspace_id=config['TOGGL']['Workspace'])
@@ -174,17 +175,17 @@ class Worktime(object):
174 except IOError as e: 175 except IOError as e:
175 if e.errno != 2: 176 if e.errno != 2:
176 raise e 177 raise e
177 178
178 179
179 hours_per_week = float(config.get('WORKTIME', 'HoursPerWeek', fallback=40)) 180 hours_per_week = float(config.get('WORKTIME', 'HoursPerWeek', fallback=40))
180 workdays = set([int(d.strip()) for d in config.get('WORKTIME', 'Workdays', fallback='1,2,3,4,5').split(',')]) 181 workdays = set([int(d.strip()) for d in config.get('WORKTIME', 'Workdays', fallback='1,2,3,4,5').split(',')])
181 time_per_day = timedelta(hours = hours_per_week) / len(workdays) 182 self.time_per_day = timedelta(hours = hours_per_week) / len(workdays)
182 183
183 holidays = dict() 184 holidays = dict()
184 185
185 leave_per_year = int(config.get('WORKTIME', 'LeavePerYear', fallback=30)) 186 leave_per_year = int(config.get('WORKTIME', 'LeavePerYear', fallback=30))
186 for year in range(start_date.year, end_date.year + 1): 187 for year in range(start_date.year, end_date.year + 1):
187 holidays |= {k: v * time_per_day for k, v in Worktime.holidays(year).items()} 188 holidays |= {k: v * self.time_per_day for k, v in Worktime.holidays(year).items()}
188 leave_frac = 1 189 leave_frac = 1
189 if date(year, 1, 1) < start_date.date(): 190 if date(year, 1, 1) < start_date.date():
190 leave_frac = (date(year + 1, 1, 1) - start_date.date()) / (date(year + 1, 1, 1) - date(year, 1, 1)) 191 leave_frac = (date(year + 1, 1, 1) - start_date.date()) / (date(year + 1, 1, 1) - date(year, 1, 1))
@@ -199,7 +200,7 @@ class Worktime(object):
199 day = datetime.strptime(datestr, date_format).replace(tzinfo=tzlocal()).date() 200 day = datetime.strptime(datestr, date_format).replace(tzinfo=tzlocal()).date()
200 if day != start_date.date(): 201 if day != start_date.date():
201 continue 202 continue
202 203
203 self.leave_budget[day.year] = (self.leave_budget[day.year] if day.year in self.leave_budget else 0) + int(count) 204 self.leave_budget[day.year] = (self.leave_budget[day.year] if day.year in self.leave_budget else 0) + int(count)
204 except IOError as e: 205 except IOError as e:
205 if e.errno != 2: 206 if e.errno != 2:
@@ -224,7 +225,7 @@ class Worktime(object):
224 toDay = parse_single(toDay) 225 toDay = parse_single(toDay)
225 else: 226 else:
226 fromDay = toDay = parse_single(datestr) 227 fromDay = toDay = parse_single(datestr)
227 time = time_per_day 228 time = self.time_per_day
228 if len(splitLine) == 2: 229 if len(splitLine) == 2:
229 [hours, datestr] = splitLine 230 [hours, datestr] = splitLine
230 time = timedelta(hours = float(hours)) 231 time = timedelta(hours = float(hours))
@@ -236,7 +237,7 @@ class Worktime(object):
236 if end_date.date() < day or day < start_date.date(): 237 if end_date.date() < day or day < start_date.date():
237 continue 238 continue
238 239
239 if excused_kind == 'leave' and not (day in holidays and holidays[day] >= time_per_day) and day.isoweekday() in workdays: 240 if excused_kind == 'leave' and not (day in holidays and holidays[day] >= self.time_per_day) and day.isoweekday() in workdays:
240 self.leave_days.add(day) 241 self.leave_days.add(day)
241 holidays[day] = time 242 holidays[day] = time
242 except IOError as e: 243 except IOError as e:
@@ -244,7 +245,7 @@ class Worktime(object):
244 raise e 245 raise e
245 246
246 pull_forward = dict() 247 pull_forward = dict()
247 248
248 start_day = start_date.date() 249 start_day = start_date.date()
249 end_day = end_date.date() 250 end_day = end_date.date()
250 251
@@ -271,7 +272,7 @@ class Worktime(object):
271 if not d == datetime.strptime(c, date_format).replace(tzinfo=tzlocal()).date(): break 272 if not d == datetime.strptime(c, date_format).replace(tzinfo=tzlocal()).date(): break
272 else: 273 else:
273 if d >= end_date.date(): 274 if d >= end_date.date():
274 pull_forward[d] = min(timedelta(hours = float(hours)), time_per_day - (holidays[d] if d in holidays else timedelta())) 275 pull_forward[d] = min(timedelta(hours = float(hours)), self.time_per_day - (holidays[d] if d in holidays else timedelta()))
275 except IOError as e: 276 except IOError as e:
276 if e.errno != 2: 277 if e.errno != 2:
277 raise e 278 raise e
@@ -280,10 +281,10 @@ class Worktime(object):
280 281
281 if pull_forward: 282 if pull_forward:
282 end_day = max(end_day, max(list(pull_forward))) 283 end_day = max(end_day, max(list(pull_forward)))
283 284
284 for day in [start_day + timedelta(days = x) for x in range(0, (end_day - start_day).days + 1)]: 285 for day in [start_day + timedelta(days = x) for x in range(0, (end_day - start_day).days + 1)]:
285 if day.isoweekday() in workdays: 286 if day.isoweekday() in workdays:
286 time_to_work = time_per_day 287 time_to_work = self.time_per_day
287 if day in holidays.keys(): 288 if day in holidays.keys():
288 time_to_work -= holidays[day] 289 time_to_work -= holidays[day]
289 if time_to_work > timedelta(): 290 if time_to_work > timedelta():
@@ -302,7 +303,7 @@ class Worktime(object):
302 day = datetime.strptime(datestr, date_format).replace(tzinfo=tzlocal()).date() 303 day = datetime.strptime(datestr, date_format).replace(tzinfo=tzlocal()).date()
303 extra_days_to_work[day] = timedelta(hours = float(hours)) 304 extra_days_to_work[day] = timedelta(hours = float(hours))
304 else: 305 else:
305 extra_days_to_work[datetime.strptime(stripped_line, date_format).replace(tzinfo=tzlocal()).date()] = time_per_day 306 extra_days_to_work[datetime.strptime(stripped_line, date_format).replace(tzinfo=tzlocal()).date()] = self.time_per_day
306 except IOError as e: 307 except IOError as e:
307 if e.errno != 2: 308 if e.errno != 2:
308 raise e 309 raise e
@@ -329,15 +330,15 @@ class Worktime(object):
329 330
330 extra_day_time_left = timedelta() 331 extra_day_time_left = timedelta()
331 for extra_day in extra_days_forward: 332 for extra_day in extra_days_forward:
332 day_time = max(timedelta(), time_per_day - extra_days_to_work[extra_day]) 333 day_time = max(timedelta(), self.time_per_day - extra_days_to_work[extra_day])
333 extra_day_time_left += day_time 334 extra_day_time_left += day_time
334 extra_day_time = min(extra_day_time_left, pull_forward[day]) 335 extra_day_time = min(extra_day_time_left, pull_forward[day])
335 time_forward = pull_forward[day] - extra_day_time 336 time_forward = pull_forward[day] - extra_day_time
336 if extra_day_time_left > timedelta(): 337 if extra_day_time_left > timedelta():
337 for extra_day in extra_days_forward: 338 for extra_day in extra_days_forward:
338 day_time = max(timedelta(), time_per_day - extra_days_to_work[extra_day]) 339 day_time = max(timedelta(), self.time_per_day - extra_days_to_work[extra_day])
339 extra_days_to_work[extra_day] += extra_day_time * (day_time / extra_day_time_left) 340 extra_days_to_work[extra_day] += extra_day_time * (day_time / extra_day_time_left)
340 341
341 hours_per_day_forward = time_forward / len(days_forward) if len(days_forward) > 0 else timedelta() 342 hours_per_day_forward = time_forward / len(days_forward) if len(days_forward) > 0 else timedelta()
342 days_forward.discard(end_date.date()) 343 days_forward.discard(end_date.date())
343 344
@@ -345,7 +346,7 @@ class Worktime(object):
345 346
346 if end_date.date() in extra_days_to_work: 347 if end_date.date() in extra_days_to_work:
347 self.time_pulled_forward += extra_days_to_work[end_date.date()] 348 self.time_pulled_forward += extra_days_to_work[end_date.date()]
348 349
349 self.time_to_work += self.time_pulled_forward 350 self.time_to_work += self.time_pulled_forward
350 351
351 self.time_worked += api.get_billable_hours(start_date, self.now, rounding = config.getboolean('WORKTIME', 'rounding', fallback=True)) 352 self.time_worked += api.get_billable_hours(start_date, self.now, rounding = config.getboolean('WORKTIME', 'rounding', fallback=True))
@@ -377,10 +378,10 @@ def worktime(**args):
377 378
378 if total_minutes_difference >= 0: 379 if total_minutes_difference >= 0:
379 difference_string = difference_string(total_minutes_difference * timedelta(minutes = 1)) 380 difference_string = difference_string(total_minutes_difference * timedelta(minutes = 1))
380 return "{difference_string}/{clockout_time}".format(difference_string = difference_string, clockout_time = clockout_time.strftime("%H:%M")) 381 return f"{difference_string}/{clockout_time:%H:%M}"
381 else: 382 else:
382 difference_string = difference_string(abs(total_minutes_difference) * timedelta(minutes = 1)) 383 difference_string = difference_string(abs(total_minutes_difference) * timedelta(minutes = 1))
383 return "{clockout_time}/{difference_string}".format(difference_string = difference_string, clockout_time = clockout_time.strftime("%H:%M")) 384 return f"{clockout_time:%H:%M}/{difference_string}"
384 else: 385 else:
385 if worktime.running_entry: 386 if worktime.running_entry:
386 difference_string = difference_string(abs(total_minutes_difference) * timedelta(minutes = 1)) 387 difference_string = difference_string(abs(total_minutes_difference) * timedelta(minutes = 1))
@@ -427,7 +428,20 @@ def time_worked(now, **args):
427 if hours_difference == 0 or minutes_difference != 0: 428 if hours_difference == 0 or minutes_difference != 0:
428 difference_string += f"{minutes_difference}m" 429 difference_string += f"{minutes_difference}m"
429 430
430 print(difference_string) 431 clockout_time = None
432 clockout_difference = None
433 if then.is_workday or now.is_workday:
434 target_time = max(then.time_per_day, now.time_per_day) if then.time_per_day and now.time_per_day else (then.time_per_day if then.time_per_day else now.time_per_day);
435 difference = target_time - worked
436 clockout_difference = 5 * ceil(difference / timedelta(minutes = 5))
437 clockout_time = now.now + difference
438 clockout_time += (5 - clockout_time.minute % 5) * timedelta(minutes = 1)
439 clockout_time = clockout_time.replace(second = 0, microsecond = 0)
440
441 if now.running_entry and clockout_time and clockout_difference >= 0:
442 print(f"{difference_string}/{clockout_time:%H:%M}")
443 else:
444 print(difference_string)
431 else: 445 else:
432 print(worked) 446 print(worked)
433 447
@@ -445,7 +459,7 @@ def holidays(year, **args):
445 date_format = config.get('WORKTIME', 'DateFormat', fallback='%Y-%m-%d') 459 date_format = config.get('WORKTIME', 'DateFormat', fallback='%Y-%m-%d')
446 460
447 table_data = [] 461 table_data = []
448 462
449 holidays = Worktime.holidays(year) 463 holidays = Worktime.holidays(year)
450 for k, v in holidays.items(): 464 for k, v in holidays.items():
451 kstr = k.strftime(date_format) 465 kstr = k.strftime(date_format)
@@ -473,7 +487,7 @@ def leave(year, table, **args):
473 break 487 break
474 else: 488 else:
475 print(f'Unaccounted leave: {day}', file=stderr) 489 print(f'Unaccounted leave: {day}', file=stderr)
476 490
477 if table: 491 if table:
478 table_data = [] 492 table_data = []
479 for year, days in leave_budget.items(): 493 for year, days in leave_budget.items():
diff --git a/user-profiles/utils.nix b/user-profiles/utils.nix
index d0d2b2c8..c5042d41 100644
--- a/user-profiles/utils.nix
+++ b/user-profiles/utils.nix
@@ -24,6 +24,7 @@
24 mosh tree vnstat file pv bc fast-cli zip nmap aspell 24 mosh tree vnstat file pv bc fast-cli zip nmap aspell
25 aspellDicts.de aspellDicts.en borgbackup man-pages rsync socat 25 aspellDicts.de aspellDicts.en borgbackup man-pages rsync socat
26 inetutils yq cached-nix-shell persistent-nix-shell rage 26 inetutils yq cached-nix-shell persistent-nix-shell rage
27 smartmontools hdparm
27 ]; 28 ];
28 }; 29 };
29} 30}