Apply treefmt

This commit is contained in:
Fabian Hauser 2025-03-25 14:10:54 +02:00
parent b2395ce611
commit b2c240e87f
86 changed files with 374 additions and 456 deletions

View file

@ -1,11 +1,8 @@
name: CI
on:
push:
env:
ATTIC_AUTH_TOKEN: ${{ secrets.ATTIC_AUTH_TOKEN }}
jobs:
build:
runs-on: nix
@ -15,18 +12,14 @@ jobs:
with:
token: ${{ secrets.CI_TOKEN }}
lfs: false
- name: Use attic cache
run: nix run .#cache use
- name: Build
run: |
nix build --max-jobs 12 --cores 12
nix run .#cache push
- name: Run Checks
run: nix flake check
- name: Deploy Docs
if: success() && github.ref == 'refs/heads/main'
run: |
@ -36,4 +29,3 @@ jobs:
# Remote build might be neccessary due to non-wheel nix users signing restrictions.
# However, the build should come from the cache anyway.
nix develop --command deploy --skip-checks --remote-build .#lindberg-webapps.\"docs-ops.qo.is\"

View file

@ -1,18 +1,21 @@
{
"eval": {
"target": {
"args": ["-f", "default.nix"],
"installable": ""
}
},
"formatting": {
"command": "nixfmt"
},
"options": {
"enable": true,
"target": {
"args": [],
"installable": ""
}
"eval": {
"target": {
"args": [
"-f",
"default.nix"
],
"installable": ""
}
},
"formatting": {
"command": "nixfmt"
},
"options": {
"enable": true,
"target": {
"args": [],
"installable": ""
}
}
}

View file

@ -1,5 +1,5 @@
{
"recommendations": [
"jnoortheen.nix-ide"
]
}
"recommendations": [
"jnoortheen.nix-ide"
]
}

View file

@ -6,29 +6,29 @@ Check out the current [rendered documentation](https://docs-ops.qo.is).
## Structure
`nixos-configurations`: Main nixos configuration for every host.
`defaults`: Configuration defaults
`nixos-modules`: Custom modules (e.g. for vpn and routers)
`nixos-configurations`: Main nixos configuration for every host.\
`defaults`: Configuration defaults\
`nixos-modules`: Custom modules (e.g. for vpn and routers)\
`private`: Private configuration values (like users, sops-encrypted secrets and keys)
## Building
This repository requires [nix flakes](https://nixos.wiki/wiki/Flakes)
- `nix build`
- `nix build`\
Build all host configurations and docs
- `nix build .#nixosConfigurations.<hostname>.config.system.build.toplevel`
- `nix build .#nixosConfigurations.<hostname>.config.system.build.toplevel`\
Build a single host configuration with
- `nix build .#docs`
- `nix build .#docs`\
Build the documentation website
## Development
- `nix develop`
- `nix develop`\
Development environment
- `nix flake check`
- `nix flake check`\
Execute the project's checks
- `nix fmt`
- `nix fmt`\
Autofix formatting
### Working with the private submodule
@ -46,7 +46,7 @@ git add private
nix flake lock --update-input private
```
## Deployment
## Deployment
`nix run .#deploy-qois`

View file

@ -4,7 +4,7 @@
- [Testing](checks/README.md)
- [Deployment](deploy/README.md)
---
______________________________________________________________________
- [Network Topology](defaults/meta/network.md)
- [Hardware (generic)](defaults/hardware/README.md)
@ -12,7 +12,6 @@
- [Updates](updates.md)
- [New Host Setup](nixos-configurations/setup.md)
# Services
- [E-mail](email.md)

View file

@ -1,11 +1,11 @@
# Tests
# Tests
## Module Tests
We test our nixos modules with [NixOS tests](https://nixos.org/manual/nixos/stable/index.html#sec-nixos-tests).
Running nixos tests requires QEMU virtualisation, so make sure you have KVM virtualisation support enabled.
Run all: `nix build .#checks.x86_64-linux.nixos-modules`
Run all: `nix build .#checks.x86_64-linux.nixos-modules`\
Run single test: `nix build .#checks.x86_64-linux.nixos-modules.entries.vm-test-run-testNameAsInDerivationName`
### Run Test Interactively

View file

@ -1,4 +1,4 @@
{ self, pkgs, ... }:
pkgs.linkFarmFromDrvs "all" (
pkgs.lib.mapAttrsToList (n: v: v.config.system.build.toplevel) self.nixosConfigurations
pkgs.lib.mapAttrsToList (_n: v: v.config.system.build.toplevel) self.nixosConfigurations
)

View file

@ -1,4 +1,3 @@
# APU
## Setup
@ -7,5 +6,5 @@ To boot the nixos installer with the console port, add `console=ttyS0,115200n8`
# ASROCK Mainboards
`F2`: Boot into BIOS
`F2`: Boot into BIOS\
`F11`: Select boot device

View file

@ -2,9 +2,7 @@
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:

View file

@ -2,9 +2,7 @@
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:

View file

@ -2,9 +2,7 @@
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:

View file

@ -1,7 +1,5 @@
{
config,
lib,
pkgs,
modulesPath,
...
}:

View file

@ -1,7 +1,4 @@
{
config,
lib,
pkgs,
...
}:
{

View file

@ -1,7 +1,4 @@
{
config,
lib,
pkgs,
...
}:
{

View file

@ -1,7 +1,5 @@
{
config,
lib,
pkgs,
...
}:
{

View file

@ -74,7 +74,6 @@ All Services are published under the *qo.is* domain name. Following services are
## Contacts
### Init7
- [Status Netzwerkdienste](https://www.init7.net/status/)

View file

@ -4,7 +4,6 @@ Note that you have to be connected to the `vpn.qo.is`
(or execute the deployment from a host that is in the `backplane.net.qo.is` overlay network)
and that you need to have SSH root access to the target machines.
## Deploy to selected target hosts
```bash

View file

@ -1,5 +1,4 @@
{
deployPkgs,
pkgs,
self,
...

View file

@ -9,7 +9,6 @@ E-Mail accounts should be created in a `first.lastname@qo.is` fashion.
Alias/forwarding Domains may be added on an best effort basis.
Bills for these domains should go directly to the respective owner (i.e. should be registered with own accounts).
## System E-mails
For groups, systems, services that require e-mail access, other accounts may be created.

View file

@ -46,7 +46,7 @@
inherit system;
overlays = [
deploy-rs.overlay
(self: super: {
(_self: super: {
deploy-rs = {
inherit (pkgs) deploy-rs;
lib = super.deploy-rs.lib;

View file

@ -8,7 +8,7 @@ let
path
;
# Get a list of all subdirectories of a directory.
getSubDirs = base: attrNames (filterAttrs (n: t: t == "directory") (builtins.readDir base));
getSubDirs = base: attrNames (filterAttrs (_n: t: t == "directory") (builtins.readDir base));
# Check if a folder with a base path and folder name contains a file with a specific name
isFolderWithFile =
fileName: basePath: folderName:

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ ... }:
{
imports = [

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ ... }:
{
fileSystems."/" = {

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ config, ... }:
let
meta = config.qois.meta;

View file

@ -1,17 +1,16 @@
# Host: Cyprianspitz
## Operations {#_operations}
## Operations {#\_operations}
Reboot requires passphrase.
``` bash
```bash
# Get HDD Password:
sops decrypt --extract '["system"]["hdd"]' private/nixos-configurations/cyprianspitz/secrets.sops.yaml
ssh -p 8223 root@calanda.plessur-ext.net.qo.is
```
Direct remote ssh access:
```
@ -24,8 +23,6 @@ TODO
- [Mainboard Manual](docs/z790m-itx-wifi.pdf)
### Top Overview
![](docs/top-view.jpg)

View file

@ -1,4 +1,4 @@
{ pkgs, config, ... }:
{ config, ... }:
{
qois.backup-server = {

View file

@ -1,6 +1,4 @@
{
config,
pkgs,
lib,
...
}:

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ ... }:
{
qois.vpn-server.enable = true;
}

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ ... }:
{
imports = [

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ config, ... }:
let
meta = config.qois.meta;

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ pkgs, ... }:
{
virtualisation.libvirtd = {
enable = true;

View file

@ -1,5 +1,4 @@
{
config,
pkgs,
lib,
...

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ ... }:
{
imports = [

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ config, ... }:
{

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ ... }:
{

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ config, ... }:
{
imports = [

View file

@ -2,7 +2,6 @@
## Setting up new static sites
Generate ssh key for deployment:
```bash

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ pkgs, ... }:
{
imports = [ ];

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ ... }:
{
imports = [

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ config, ... }:
{

View file

@ -1,10 +1,10 @@
# Host: Lindberg
## Operations {#_operations}
## Operations {#\_operations}
Reboot requires passphrase (see pass `host/lindberg/hdd_luks`)
``` bash
```bash
ssh -p 2222 root@lindberg.riedbach-ext.net.qo.is
```
@ -12,7 +12,6 @@ ssh -p 2222 root@lindberg.riedbach-ext.net.qo.is
- [Mainboard Manual](docs/X570Pro4-mainboard-manual.pdf)
### Front / Back
#### Front Overview

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ ... }:
{
imports = [ ./loadbalancer.nix ];

View file

@ -1,7 +1,4 @@
{
config,
pkgs,
lib,
...
}:

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ ... }:
{
qois.backup-client.includePaths = [ "/mnt/data" ];

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ ... }:
{
imports = [

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ config, ... }:
let
meta = config.qois.meta;

View file

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ pkgs, ... }:
{
virtualisation.libvirtd = {
enable = true;

View file

@ -3,8 +3,8 @@
## Prepare Remote Machine
1. Boot nixos installer image
2. Set a root password: `sudo passwd root`
3. Get host ip to connect to ssh with `ip a`
1. Set a root password: `sudo passwd root`
1. Get host ip to connect to ssh with `ip a`
## Verify configuration
@ -12,7 +12,7 @@
## Installation
```bash
````bash
nix develop
# Set according to what we want
@ -60,11 +60,11 @@ sops exec-file --no-fifo --filename secret.key private/nixos-configurations/$REM
--disk-encryption-keys /run/secrets/system/hdd.key <(yq --raw-output '.system.hdd' {}) \
--disk-encryption-keys /run/secrets/system/initrd-ssh-key <(yq --raw-output '.system.\"initrd-ssh-key\"' {})
"
```
````
## Post-Setup
* Add backplane-vpn pubkey to `network-virtual.nix` configuration with
- Add backplane-vpn pubkey to `network-virtual.nix` configuration with
```bash
wg pubkey < /secrets/wireguard/private/backplane
```

View file

@ -1,7 +1,7 @@
# Operations {#_operations}
# Operations {#\_operations}
Reboot requires passphrase (see pass `host/stompert/hdd_luks`)
``` bash
```bash
ssh -p 2222 root@stompert.eem-ext.net.qo.is
```

View file

@ -2,7 +2,7 @@
# your system. Help is available in the configuration.nix(5) man page
# and in the NixOS manual (accessible by running nixos-help).
{ config, pkgs, ... }:
{ ... }:
{
imports = [

View file

@ -1,6 +1,5 @@
{
config,
pkgs,
lib,
...
}:

View file

@ -1,6 +1,5 @@
{
config,
pkgs,
lib,
...
}:
@ -35,7 +34,7 @@ in
networking.hosts = pipe cfg.loadbalancers [
(map (hostname: config.qois.meta.network.virtual.backplane.hosts.${hostname}.v4.ip))
(flip genAttrs (lb: cfg.domains))
(flip genAttrs (_lb: cfg.domains))
];
};

View file

@ -1,7 +1,6 @@
{
config,
lib,
pkgs,
...
}:
with lib;

View file

@ -2,8 +2,6 @@
config,
lib,
options,
pkgs,
self,
...
}:

View file

@ -2,8 +2,6 @@
config,
lib,
options,
pkgs,
self,
...
}:

View file

@ -11,7 +11,7 @@ For user documentation, refer to the [upstream Nextcloud docs](https://docs.next
## Backup / Restore
1. Stop all related services: nextcloud, php-fpm, redis etc.
2. (mabe dump redis data?)
3. Import Database Backup
4. Restore `/var/lib/nextcloud`, which is currently a bind mount on `lindberg`'s `/mnt/data` volume
5. Resync nextcloud files and database, see [nextcloud docs](https://docs.nextcloud.com/server/latest/admin_manual/maintenance/restore.html)
1. (mabe dump redis data?)
1. Import Database Backup
1. Restore `/var/lib/nextcloud`, which is currently a bind mount on `lindberg`'s `/mnt/data` volume
1. Resync nextcloud files and database, see [nextcloud docs](https://docs.nextcloud.com/server/latest/admin_manual/maintenance/restore.html)

View file

@ -2,7 +2,6 @@
Runner for the [Forgejo git instance](../git/README.md).
## Default docker/ubuntu Runner
Registers a default runner with ubuntu OS or executes user's OCI container with podman.

View file

@ -146,7 +146,7 @@ with lib;
{
systemd.services =
genAttrs (genList (n: "gitea-runner-nix${builtins.toString n}") cfg.nixInstances)
(name: {
(_name: {
after = [
"gitea-runner-nix-image.service"
];

View file

@ -38,7 +38,6 @@ sudo -u forgejo 'nix run nixpkgs#forgejo -- admin user create --config ~custom/c
## Backup / Restore
1. `systemctl stop forgejo.service`
2. Import Postgresql Database Backup
3. Restore `/var/lib/forgejo`
4. `systemctl start forgejo.service`
1. Import Postgresql Database Backup
1. Restore `/var/lib/forgejo`
1. `systemctl start forgejo.service`

View file

@ -9,7 +9,7 @@ with lib;
let
# We assume that all static pages are hosted on lindberg-webapps
staticPages = pipe config.qois.static-page.pages [
(mapAttrsToList (name: { domain, domainAliases, ... }: [ domain ] ++ domainAliases))
(mapAttrsToList (_name: { domain, domainAliases, ... }: [ domain ] ++ domainAliases))
flatten
(map (name: {
inherit name;

View file

@ -1,7 +1,6 @@
{
config,
lib,
pkgs,
...
}:
@ -83,7 +82,7 @@ in
with lib;
concatLists (
mapAttrsToList (
name: user: if elem "wheel" user.extraGroups then user.openssh.authorizedKeys.keys else [ ]
_name: user: if elem "wheel" user.extraGroups then user.openssh.authorizedKeys.keys else [ ]
) config.users.users
);
hostKeys = [ cfg.sshHostKey ];

View file

@ -1,8 +1,4 @@
{
config,
lib,
pkgs,
options,
...
}:
{

View file

@ -1,7 +1,6 @@
{
config,
lib,
pkgs,
options,
...
}:
@ -43,10 +42,10 @@ in
};
config =
let
hostsWithSshKey = lib.filterAttrs (name: hostCfg: hostCfg.sshKey != null) cfg;
hostsWithSshKey = lib.filterAttrs (_name: hostCfg: hostCfg.sshKey != null) cfg;
in
{
programs.ssh.knownHosts = lib.mapAttrs (name: hostCfg: {
programs.ssh.knownHosts = lib.mapAttrs (_name: hostCfg: {
publicKey = hostCfg.sshKey;
}) hostsWithSshKey;
};

View file

@ -1,7 +1,6 @@
{
config,
lib,
pkgs,
options,
...
}:
@ -17,13 +16,6 @@ let
type = str;
inherit description;
});
mkOptStr =
description:
(mkOption {
type = nullOr str;
default = null;
inherit description;
});
mkNetworkIdOpts =
v:
@ -225,10 +217,10 @@ in
(getHostNamesForNetworks hostname cfg.virtual) ++ (getHostNamesForNetworks hostname cfg.physical);
hostsWithPublicKey = lib.filterAttrs (
hostName: hostConfig: hostConfig.sshKey != null
_hostName: hostConfig: hostConfig.sshKey != null
) config.qois.meta.hosts;
in
mapAttrs (name: hostCfg: { extraHostNames = getHostNames name; }) hostsWithPublicKey;
mapAttrs (name: _hostCfg: { extraHostNames = getHostNames name; }) hostsWithPublicKey;
};
}

View file

@ -1,6 +1,5 @@
{
config,
pkgs,
lib,
...
}:

View file

@ -1,7 +1,6 @@
{
config,
lib,
pkgs,
...
}:

View file

@ -1,7 +1,6 @@
{
config,
lib,
pkgs,
...
}:

View file

@ -1,14 +1,12 @@
{
config,
lib,
pkgs,
...
}:
with lib;
let
routerCfg = config.qois.router;
cfg = config.qois.router.wireless;
in
{

View file

@ -1,4 +1,4 @@
# Router Role {#_router_role}
# Router Role {#\_router_role}
The `router` role set is applied on hosts which serve the rule of a SOHO
router.

View file

@ -1,7 +1,6 @@
{
config,
lib,
pkgs,
...
}:

View file

@ -3,4 +3,3 @@
This module enables static nginx sites, with data served from "/var/lib/nginx/$domain/root".
To deploy the site, a user `nginx-$domain` is added, of which a `root` profile in the home folder can be deployed, e.g. with deploy-rs.

View file

@ -53,7 +53,7 @@ with lib;
config = mkIf cfg.enable (
let
pageConfigs = concatMapAttrs (
name: page:
_name: page:
let
home = "/var/lib/nginx-${page.domain}";
in
@ -76,7 +76,7 @@ with lib;
users = {
groups = concatMapAttrs (
name:
_name:
{ user, ... }:
{
"${user}" = { };
@ -84,10 +84,10 @@ with lib;
) pageConfigs;
users =
{
${config.services.nginx.user}.extraGroups = mapAttrsToList (domain: getAttr "user") pageConfigs;
${config.services.nginx.user}.extraGroups = mapAttrsToList (_domain: getAttr "user") pageConfigs;
}
// (concatMapAttrs (
name:
_name:
{
user,
home,
@ -134,10 +134,10 @@ with lib;
globalRedirect = domain;
};
});
aliasVhosts = concatMapAttrs (name: mkAliasVhost) pageConfigs;
aliasVhosts = concatMapAttrs (_name: mkAliasVhost) pageConfigs;
in
aliasVhosts // (mapAttrs (name: mkVhost) pageConfigs);
aliasVhosts // (mapAttrs (_name: mkVhost) pageConfigs);
};
}
);

View file

@ -12,9 +12,11 @@ def test(subtest, webserver):
# Helpers
def curl_variable_test(node, variable, expected, url):
value = node.succeed(
f"curl -s --no-location -o /dev/null -w '%{{{variable}}}' '{url}'")
assert value == expected, \
f"curl -s --no-location -o /dev/null -w '%{{{variable}}}' '{url}'"
)
assert value == expected, (
f"expected {variable} to be '{expected}' but got '{value}'"
)
def expect_http_code(node, code, url):
curl_variable_test(node, "http_code", code, url)
@ -24,23 +26,21 @@ def test(subtest, webserver):
def expect_http_content(node, expectedContent, url):
content = node.succeed(f"curl --no-location --silent '{url}'")
assert content.strip() == expectedContent.strip(), f'''
assert content.strip() == expectedContent.strip(), f"""
expected content:
{expectedContent}
at {url} but got following content:
{content}
'''
"""
# Tests
with subtest("website is successfully served on localhost"):
expect_http_code(webserver, "200", "http://localhost/index.html")
expect_http_content(webserver, indexContent,
"http://localhost/index.html")
expect_http_content(webserver, indexContent, "http://localhost/index.html")
with subtest("example.com is in hosts file and a redirect to localhost"):
webserver.succeed("grep example.com /etc/hosts")
url = "http://example.com/index.html"
expect_http_code(webserver, "301", url)
expect_http_location(
webserver, "http://localhost/index.html", url)
expect_http_location(webserver, "http://localhost/index.html", url)

View file

@ -1,6 +1,4 @@
{
config,
lib,
pkgs,
...
}:

View file

@ -1,7 +1,4 @@
{
config,
lib,
pkgs,
options,
...
}:

View file

@ -1,7 +1,5 @@
{
config,
lib,
pkgs,
...
}:
with lib;

View file

@ -1,7 +1,6 @@
{
config,
lib,
pkgs,
...
}:

View file

@ -3,7 +3,7 @@
To use our Vaultwarden instance, you can use the regular
[Bitwarden apps](https://bitwarden.com/download/) with our custom server when logging in:
Username: `first.lastname@qo.is`
Username: `first.lastname@qo.is`\
Server Name: `https://vault.qo.is`
## Create Accounts
@ -17,7 +17,6 @@ Please instruct users to:
- the password cannot be reset without loosing all the passwords.
Use of [Emergency Contacts](https://bitwarden.com/help/emergency-access/) or Organizations may be advisable.
## Administration
An admin panel is available under [vault.qo.is/admin](https://vault.qo.is/admin).
@ -26,12 +25,10 @@ The password is saved in the pass database under `vaultwarden-admin`.
In the administration panel, users and organizations may be managed.
Instance settings should be changed with the nixos module in the infrastructure repository only.
## Backup / Restore
1. `systemctl stop vaultwarden.service`
2. Import Postgresql Database Backup
3. Restore `/var/lib/bitwarden_rs`
4. `systemctl start vaultwarden.service`
5. Click `Force clients to resync` in the [Administration interface under _Users_](https://vault.qo.is/admin/users/overview)
1. Import Postgresql Database Backup
1. Restore `/var/lib/bitwarden_rs`
1. `systemctl start vaultwarden.service`
1. Click `Force clients to resync` in the [Administration interface under _Users_](https://vault.qo.is/admin/users/overview)

View file

@ -1,6 +1,5 @@
{
config,
pkgs,
lib,
...
}:

View file

@ -29,14 +29,13 @@ These nodes allow access to the internet for clients connected to the VPN:
> ⚠️ Currently, name resolution for these do not work reliably on first starts, hence the IP must be used. This hould be fixed in the future.
### Add exit nodes:
1. Create a preauth secret on the `vpn.qo.is` host:
```bash
headscale preauthkeys create --user srv --reusable
```
2. Configure the new exit-node host with the `qois.vpn-exit-node` module.
```
1. Configure the new exit-node host with the `qois.vpn-exit-node` module.
When using the `srv` user, exit nodes and routes are automatically accepted as trusted.
@ -50,11 +49,9 @@ To use the service, you can use a normal Tailscale client with following additio
| `exit-node` | `100.64.0.5` (lindberg) or `100.64.0.6` (cypriaspitz) | Use host as [exit node](#exit-nodes) |
| `login-server` | `https://vpn.qo.is` | Use our own VPN service. |
> ⚠️ Currently, if the client is in an IPv6 network, the transport is broken.
> Disable IPv6 connectivity to use the VPN.
> See [#4](https://git.qo.is/qo.is/infrastructure/issues/4) for details.
> See [#4](https://git.qo.is/qo.is/infrastructure/issues/4) for details.
### NixOS
@ -90,15 +87,15 @@ See [this Headscale documentation for more](https://headscale.net/stable/usage/c
### Server
1. `systemctl stop headscale`
2. Replace `/var/lib/headscale`
3. `systemctl start headscale`
4. Monitor logs for errors
1. Replace `/var/lib/headscale`
1. `systemctl start headscale`
1. Monitor logs for errors
Note: `/var/lib/headscale` contains a sqlite database.
### Clients
1. `systemctl stop tailscaled`
2. Replace `/var/lib/tailscale`
3. `systemctl start tailscaled`
4. Monitor logs for errors
1. Replace `/var/lib/tailscale`
1. `systemctl start tailscaled`
1. Monitor logs for errors

View file

@ -10,7 +10,7 @@ let
cfgLoadbalancer = config.qois.loadbalancer;
defaultDnsRecords =
(mapAttrs (
name: value: mkIf (cfgLoadbalancer.hostmap ? ${value}) cfgLoadbalancer.hostmap.${value}
_name: value: mkIf (cfgLoadbalancer.hostmap ? ${value}) cfgLoadbalancer.hostmap.${value}
) cfgLoadbalancer.domains)
// {
"vpn.qo.is" = config.services.headscale.address;

View file

@ -1,8 +1,8 @@
# WWAN Module {#_wwan_module}
# WWAN Module {#\_wwan_module}
This module configures WWAN adapters that support MBIM
## Current limitations {#_current_limitations}
## Current limitations {#\_current_limitations}
- IPv4 tested only
- Currently, it is not simple to get network failures or address

View file

@ -6,11 +6,11 @@ MODE=$1
DEV=$2
if [ "$DEBUG" == "" ]; then
DEBUG="false"
DEBUG="false"
fi
if [ "$MBIM_INTERFACE" == "" ]; then
MBIM_INTERFACE="/dev/cdc-wdm0"
MBIM_INTERFACE="/dev/cdc-wdm0"
fi
###############################################################################
@ -29,274 +29,273 @@ ipv6_dns=()
ipv6_mtu=""
export previous_state state skip_line \
ipv4_addresses ipv4_gateway ipv4_dns ipv4_mtu \
ipv6_addresses ipv6_gateway ipv6_dns ipv6_mtu
ipv4_addresses ipv4_gateway ipv4_dns ipv4_mtu \
ipv6_addresses ipv6_gateway ipv6_dns ipv6_mtu
###############################################################################
# Function
###############################################################################
function print_debug {
if [ "$DEBUG" != "false" ]; then
echo "[State: $state] $1" >&2
fi
if [ "$DEBUG" != "false" ]; then
echo "[State: $state] $1" >&2
fi
}
function print_full_configuration {
if [[ "${#ipv4_addresses[@]}" > 0 ]]; then
printf "IPv4: "
printf '%s, ' "${ipv4_addresses[@]}"
printf "\n"
if [[ ${#ipv4_addresses[@]} > 0 ]]; then
printf "IPv4: "
printf '%s, ' "${ipv4_addresses[@]}"
printf "\n"
printf "GW: $ipv4_gateway\n"
printf "GW: $ipv4_gateway\n"
printf "DNS: "
printf '%s, ' "${ipv4_dns[@]}"
printf "\n"
printf "DNS: "
printf '%s, ' "${ipv4_dns[@]}"
printf "\n"
printf "MTU: $ipv4_mtu\n"
fi
printf "MTU: $ipv4_mtu\n"
fi
if [[ "${#ipv6_addresses[@]}" > 0 ]]; then
echo
printf "IPv6: "
printf '%s, ' "${ipv6_addresses[@]}"
printf "\n"
if [[ ${#ipv6_addresses[@]} > 0 ]]; then
echo
printf "IPv6: "
printf '%s, ' "${ipv6_addresses[@]}"
printf "\n"
printf "GW: $ipv6_gateway\n"
printf "GW: $ipv6_gateway\n"
printf "DNS: "
printf '%s, ' "${ipv6_dns[@]}"
printf "\n"
printf "DNS: "
printf '%s, ' "${ipv6_dns[@]}"
printf "\n"
printf "MTU: $ipv6_mtu\n"
fi
printf "MTU: $ipv6_mtu\n"
fi
}
function next_state {
previous_state="$state"
state="$1"
previous_state="$state"
state="$1"
}
function parse_ip {
# IP [0]: '10.134.203.177/30'
local line_re="IP \[([0-9]+)\]: '(.+)'"
local input=$1
if [[ $input =~ $line_re ]]; then
local ip_cnt=${BASH_REMATCH[1]}
local ip=${BASH_REMATCH[2]}
fi
echo "$ip"
# IP [0]: '10.134.203.177/30'
local line_re="IP \[([0-9]+)\]: '(.+)'"
local input=$1
if [[ $input =~ $line_re ]]; then
local ip_cnt=${BASH_REMATCH[1]}
local ip=${BASH_REMATCH[2]}
fi
echo "$ip"
}
function parse_dns {
# IP [0]: '10.134.203.177/30'
local line_re="DNS \[([0-9]+)\]: '(.+)'"
local input=$1
if [[ $input =~ $line_re ]]; then
local dns_cnt=${BASH_REMATCH[1]}
local dns=${BASH_REMATCH[2]}
fi
echo "$dns"
# IP [0]: '10.134.203.177/30'
local line_re="DNS \[([0-9]+)\]: '(.+)'"
local input=$1
if [[ $input =~ $line_re ]]; then
local dns_cnt=${BASH_REMATCH[1]}
local dns=${BASH_REMATCH[2]}
fi
echo "$dns"
}
function parse_gateway {
# Gateway: '10.134.203.178'
local line_re="Gateway: '(.+)'"
local input=$1
if [[ $input =~ $line_re ]]; then
local gw=${BASH_REMATCH[1]}
fi
echo "$gw"
# Gateway: '10.134.203.178'
local line_re="Gateway: '(.+)'"
local input=$1
if [[ $input =~ $line_re ]]; then
local gw=${BASH_REMATCH[1]}
fi
echo "$gw"
}
function parse_mtu {
# MTU: '1500'
local line_re="MTU: '([0-9]+)'"
local input=$1
if [[ $input =~ $line_re ]]; then
local mtu=${BASH_REMATCH[1]}
fi
echo "$mtu"
# MTU: '1500'
local line_re="MTU: '([0-9]+)'"
local input=$1
if [[ $input =~ $line_re ]]; then
local mtu=${BASH_REMATCH[1]}
fi
echo "$mtu"
}
function parse_input_state_machine {
state="start"
while true; do
if [[ "$skip_line" == 0 ]]; then
read line || break # TODO: Clean up
else
skip_line=0
fi
case "$state" in
"start")
read line || break # first line is empty, read a new one #TODO: This is not very clean...
case "$line" in
*"configuration available: 'none'"*)
# Skip none state
# TODO: This is a workaround of the original parser's shortcomming
continue
;;
*"IPv4 configuration available"*)
next_state "ipv4_ip"
continue
;;
*"IPv6 configuration available"*)
next_state "ipv6_ip"
continue
;;
*)
next_state "exit"
continue
;;
esac
;;
"error")
echo "Error in pattern matchin of state $previous_state. Exiting." >&2
exit 2
;;
"exit")
break
;;
"ipv4_ip")
ipv4=$(parse_ip "$line")
if [ -z "$ipv4" ]; then
if [[ "${#ipv4_addresses[@]}" < 1 ]]; then
next_state "error"
continue
else
next_state "ipv4_gateway"
skip_line=1
continue
fi
fi
print_debug "$ipv4"
ipv4_addresses+=("$ipv4")
;;
"ipv4_gateway")
gw=$(parse_gateway "$line")
if [ -z "$gw" ]; then
next_state "error"
continue
fi
print_debug "$gw"
ipv4_gateway="$gw"
next_state "ipv4_dns"
;;
"ipv4_dns")
ipv4=$(parse_dns "$line")
if [ -z "$ipv4" ]; then
if [[ "${#ipv4_dns[@]}" < 1 ]]; then
next_state "error"
continue
else
next_state "ipv4_mtu"
skip_line=1
continue
fi
fi
print_debug "$ipv4"
ipv4_dns+=("$ipv4")
;;
"ipv4_mtu")
mtu=$(parse_mtu "$line")
if [ -z "$mtu" ]; then
next_state "error"
continue
fi
print_debug "$mtu"
ipv4_mtu="$mtu"
next_state "start"
;;
"ipv6_ip")
ipv6=$(parse_ip "$line")
if [ -z "$ipv6" ]; then
if [[ "${#ipv6_addresses[@]}" < 1 ]]; then
next_state "error"
continue
else
next_state "ipv6_gateway"
skip_line=1
continue
fi
fi
print_debug "$ipv6"
ipv6_addresses+=("$ipv6")
;;
"ipv6_gateway")
gw=$(parse_gateway "$line")
if [ -z "$gw" ]; then
next_state "error"
continue
fi
print_debug "$gw"
ipv6_gateway="$gw"
next_state "ipv6_dns"
;;
"ipv6_dns")
ipv6=$(parse_dns "$line")
if [ -z "$ipv6" ]; then
if [[ "${#ipv6_dns[@]}" < 1 ]]; then
next_state "error"
continue
else
next_state "ipv6_mtu"
skip_line=1
continue
fi
fi
print_debug "$ipv6"
ipv6_dns+=("$ipv6")
;;
"ipv6_mtu")
mtu=$(parse_mtu "$line")
if [ -z "$mtu" ]; then
next_state "error"
continue
fi
print_debug "$mtu"
ipv6_mtu="$mtu"
next_state "start"
;;
*)
print_debug "Invalid state (came from $previous_state). Exiting."
exit 0
;;
esac
done
state="start"
while true; do
if [[ $skip_line == 0 ]]; then
read line || break # TODO: Clean up
else
skip_line=0
fi
case "$state" in
"start")
read line || break # first line is empty, read a new one #TODO: This is not very clean...
case "$line" in
*"configuration available: 'none'"*)
# Skip none state
# TODO: This is a workaround of the original parser's shortcomming
continue
;;
*"IPv4 configuration available"*)
next_state "ipv4_ip"
continue
;;
*"IPv6 configuration available"*)
next_state "ipv6_ip"
continue
;;
*)
next_state "exit"
continue
;;
esac
;;
"error")
echo "Error in pattern matchin of state $previous_state. Exiting." >&2
exit 2
;;
"exit")
break
;;
"ipv4_ip")
ipv4=$(parse_ip "$line")
if [ -z "$ipv4" ]; then
if [[ ${#ipv4_addresses[@]} < 1 ]]; then
next_state "error"
continue
else
next_state "ipv4_gateway"
skip_line=1
continue
fi
fi
print_debug "$ipv4"
ipv4_addresses+=("$ipv4")
;;
"ipv4_gateway")
gw=$(parse_gateway "$line")
if [ -z "$gw" ]; then
next_state "error"
continue
fi
print_debug "$gw"
ipv4_gateway="$gw"
next_state "ipv4_dns"
;;
"ipv4_dns")
ipv4=$(parse_dns "$line")
if [ -z "$ipv4" ]; then
if [[ ${#ipv4_dns[@]} < 1 ]]; then
next_state "error"
continue
else
next_state "ipv4_mtu"
skip_line=1
continue
fi
fi
print_debug "$ipv4"
ipv4_dns+=("$ipv4")
;;
"ipv4_mtu")
mtu=$(parse_mtu "$line")
if [ -z "$mtu" ]; then
next_state "error"
continue
fi
print_debug "$mtu"
ipv4_mtu="$mtu"
next_state "start"
;;
"ipv6_ip")
ipv6=$(parse_ip "$line")
if [ -z "$ipv6" ]; then
if [[ ${#ipv6_addresses[@]} < 1 ]]; then
next_state "error"
continue
else
next_state "ipv6_gateway"
skip_line=1
continue
fi
fi
print_debug "$ipv6"
ipv6_addresses+=("$ipv6")
;;
"ipv6_gateway")
gw=$(parse_gateway "$line")
if [ -z "$gw" ]; then
next_state "error"
continue
fi
print_debug "$gw"
ipv6_gateway="$gw"
next_state "ipv6_dns"
;;
"ipv6_dns")
ipv6=$(parse_dns "$line")
if [ -z "$ipv6" ]; then
if [[ ${#ipv6_dns[@]} < 1 ]]; then
next_state "error"
continue
else
next_state "ipv6_mtu"
skip_line=1
continue
fi
fi
print_debug "$ipv6"
ipv6_dns+=("$ipv6")
;;
"ipv6_mtu")
mtu=$(parse_mtu "$line")
if [ -z "$mtu" ]; then
next_state "error"
continue
fi
print_debug "$mtu"
ipv6_mtu="$mtu"
next_state "start"
;;
*)
print_debug "Invalid state (came from $previous_state). Exiting."
exit 0
;;
esac
done
}
interface_stop() {
ip addr flush dev $DEV
ip route flush dev $DEV
interface_stop(){
ip addr flush dev $DEV
ip route flush dev $DEV
ip -6 addr flush dev $DEV
ip -6 route flush dev $DEV
ip -6 addr flush dev $DEV
ip -6 route flush dev $DEV
#TODO: Nameserver?
#TODO: Nameserver?
}
interface_start() {
ip link set $DEV up
ip link set $DEV up
if [[ "${#ipv4_addresses[@]}" > 0 ]]; then
ip addr add ${ipv4_addresses[@]} dev $DEV broadcast + #TODO: Works for multiple addresses?
ip link set $DEV mtu $ipv4_mtu
ip route add default via $ipv4_gateway dev $DEV
#TODO: nameserver ${ipv4_dns[@]}
else
echo "No IPv4 address, skipping v4 configuration..."
fi
if [[ ${#ipv4_addresses[@]} > 0 ]]; then
ip addr add ${ipv4_addresses[@]} dev $DEV broadcast + #TODO: Works for multiple addresses?
ip link set $DEV mtu $ipv4_mtu
ip route add default via $ipv4_gateway dev $DEV
#TODO: nameserver ${ipv4_dns[@]}
else
echo "No IPv4 address, skipping v4 configuration..."
fi
if [[ "${#ipv6_addresses[@]}" > 0 ]]; then
ip -6 addr add ${ipv6_addresses[@]} dev $DEV #TODO: Works for multiple addresses?
ip -6 route add default via $ipv6_gateway dev $DEV
ip -6 link set $DEV mtu $ipv6_mtu
#TODO: nameserver ${ipv6_dns[@]}"
else
echo "No IPv6 address, skipping v6 configuration..."
fi
if [[ ${#ipv6_addresses[@]} > 0 ]]; then
ip -6 addr add ${ipv6_addresses[@]} dev $DEV #TODO: Works for multiple addresses?
ip -6 route add default via $ipv6_gateway dev $DEV
ip -6 link set $DEV mtu $ipv6_mtu
#TODO: nameserver ${ipv6_dns[@]}"
else
echo "No IPv6 address, skipping v6 configuration..."
fi
}
###############################################################################
@ -307,23 +306,23 @@ set -e
echo "NOTE: This script does not yet support nameserver configuration."
case "$MODE" in
"start")
mbim-network $MBIM_INTERFACE start
sleep 1
mbimcli -d $MBIM_INTERFACE -p --query-ip-configuration=0 | {
parse_input_state_machine
print_full_configuration
interface_stop
interface_start
}
;;
"stop")
mbim-network $MBIM_INTERFACE stop
interface_stop
;;
*)
echo "USAGE: $0 start|stop INTERFACE" >&2
echo "You can set an env variable DEBUG to gather debugging output." >&2
exit 1
;;
"start")
mbim-network $MBIM_INTERFACE start
sleep 1
mbimcli -d $MBIM_INTERFACE -p --query-ip-configuration=0 | {
parse_input_state_machine
print_full_configuration
interface_stop
interface_start
}
;;
"stop")
mbim-network $MBIM_INTERFACE stop
interface_stop
;;
*)
echo "USAGE: $0 start|stop INTERFACE" >&2
echo "You can set an env variable DEBUG to gather debugging output." >&2
exit 1
;;
esac

View file

@ -41,9 +41,9 @@ let
userAgeKeys = [ ];
serverAgeKeys =
let
getHostsWithSshKeys = filterAttrs (name: cfg: cfg ? sshKey);
getHostsWithSshKeys = filterAttrs (_name: cfg: cfg ? sshKey);
mapHostToAgeKey = mapAttrs (
name: cfg:
_name: cfg:
readFile (
runCommand "sshToAgeKey"
{

View file

@ -1,13 +1,16 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [ "config:recommended" ],
"extends": [
"config:recommended"
],
"lockFileMaintenance": {
"enabled": true,
"extends": [ "schedule:weekly" ]
"extends": [
"schedule:weekly"
]
},
"cloneSubmodules": true,
"nix": {
"enabled": true
}
}

View file

@ -16,7 +16,7 @@ git commit
git push
```
Deploy updates:
Deploy updates:
```bash
nix develop
@ -47,11 +47,10 @@ pssh -l root -H lindberg-nextcloud.backplane.net.qo.is -H lindberg-build.backpla
## Application Updates
Some applications have pinned versions to prevent problems due to accidental upgrades.
Some applications have pinned versions to prevent problems due to accidental upgrades.\
The version switch has to be done manually by switching the package used.
This includes the modules for:
- `nextcloud`
- `postgresql`, [&rarr; Nixpkgs manual page](https://nixos.org/manual/nixos/stable/#module-services-postgres-upgrading)
- `postgresql`, [→ Nixpkgs manual page](https://nixos.org/manual/nixos/stable/#module-services-postgres-upgrading)