Use specific git forgejo CI
Some checks failed
CI / build (push) Failing after 0s

This commit is contained in:
Fabian Hauser 2024-10-03 13:46:37 +03:00
parent 8fed325e09
commit 6bbd0f0157
5 changed files with 260 additions and 60 deletions

View file

@ -223,11 +223,11 @@
]
},
"locked": {
"lastModified": 1727557927,
"narHash": "sha256-+dTv85ZXAatKiCu5VKTQkFE/RmWdlXwkuPvjOmfcPBI=",
"lastModified": 1727954097,
"narHash": "sha256-Fmi1bGcyVLVMpSURwXnGCwWl5K0MVAJHuybDa/vYDis=",
"ref": "refs/heads/main",
"rev": "9a646336c5ad419ec79ae81a47d68213bdcbff92",
"revCount": 5,
"rev": "1d096ecce6a9b722dbdc70515375ec6798958c23",
"revCount": 6,
"type": "git",
"url": "file:./private"
},

View file

@ -2,7 +2,6 @@
{
imports = [
./gitlab-runner.nix
./attic.nix
./nixpkgs-cache.nix
];

View file

@ -1,27 +0,0 @@
{ config, pkgs, ... }:
{
services.gitlab-runner = {
enable = true;
gracefulTimeout = "20min";
clear-docker-cache = {
enable = true;
dates = "monthly";
};
services = {
default = {
runUntagged = true;
# File should contain at least these two variables:
# `CI_SERVER_URL`
# `REGISTRATION_TOKEN`
registrationConfigFile = config.sops.secrets."gitlab-runner/default-registration".path;
dockerImage = "debian:stable";
limit = 42; # The magic value
maximumTimeout = 7200; # 2h oughta be enough for everyone
};
};
};
}

View file

@ -19,34 +19,262 @@ with lib;
default = "git.qo.is";
description = "Domain, under which the service is served.";
};
};
config = mkIf cfg.enable {
sops.secrets."forgejo/runner-token/${defaultInstanceName}".restartUnits = [
"gitea-runner-${defaultInstanceName}.service"
];
services.gitea-actions-runner = {
package = pkgs.forgejo-runner;
instances.${defaultInstanceName} = {
enable = true;
name = "${config.networking.hostName}-${defaultInstanceName}";
url = "https://${cfg.domain}";
tokenFile = config.sops.secrets."forgejo/runner-token/${defaultInstanceName}".path;
labels = [
"ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
"ubuntu-22.04:docker://ghcr.io/catthehacker/ubuntu:act-22.04"
"docker:docker://code.forgejo.org/oci/alpine:3.20"
];
settings = {
log.level = "warn";
runner = {
capacity = 30;
};
cache.enable = true; # TODO: This should probably be a central cache server?
};
};
nixInstances = mkOption {
type = types.numbers.positive;
default = 1;
description = "How many nix runner instances to start";
};
};
config = mkIf cfg.enable (mkMerge [
{
sops.secrets."forgejo/runner-registration-token".restartUnits = [
"gitea-runner-${defaultInstanceName}.service"
];
virtualisation.podman = {
enable = true;
dockerCompat = true;
dockerSocket.enable = true;
autoPrune.enable = true;
};
services.gitea-actions-runner = {
package = pkgs.forgejo-runner;
instances.${defaultInstanceName} = {
enable = true;
name = "${config.networking.hostName}-${defaultInstanceName}";
url = "https://${cfg.domain}";
tokenFile = config.sops.secrets."forgejo/runner-registration-token".path;
labels = [
"ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
"ubuntu-22.04:docker://ghcr.io/catthehacker/ubuntu:act-22.04"
"docker:docker://code.forgejo.org/oci/alpine:3.20"
];
settings = {
log.level = "warn";
runner = {
capacity = 30;
};
cache.enable = true; # TODO: This should probably be a central cache server?
};
};
};
}
{
# everything here has no dependencies on the store
systemd.services.gitea-runner-nix-image = {
wantedBy = [ "multi-user.target" ];
after = [ "podman.service" ];
requires = [ "podman.service" ];
path = [
config.virtualisation.podman.package
pkgs.gnutar
pkgs.shadow
pkgs.getent
];
# we also include etc here because the cleanup job also wants the nixuser to be present
script = ''
set -eux -o pipefail
mkdir -p etc/nix
# Create an unpriveleged user that we can use also without the run-as-user.sh script
touch etc/passwd etc/group
groupid=$(cut -d: -f3 < <(getent group nixuser))
userid=$(cut -d: -f3 < <(getent passwd nixuser))
groupadd --prefix $(pwd) --gid "$groupid" nixuser
emptypassword='$6$1ero.LwbisiU.h3D$GGmnmECbPotJoPQ5eoSTD6tTjKnSWZcjHoVTkxFLZP17W9hRi/XkmCiAMOfWruUwy8gMjINrBMNODc7cYEo4K.'
useradd --prefix $(pwd) -p "$emptypassword" -m -d /tmp -u "$userid" -g "$groupid" -G nixuser nixuser
cat <<NIX_CONFIG > etc/nix/nix.conf
accept-flake-config = true
experimental-features = nix-command flakes
NIX_CONFIG
cat <<NSSWITCH > etc/nsswitch.conf
passwd: files mymachines systemd
group: files mymachines systemd
shadow: files
hosts: files mymachines dns myhostname
networks: files
ethers: files
services: files
protocols: files
rpc: files
NSSWITCH
# list the content as it will be imported into the container
tar -cv . | tar -tvf -
tar -cv . | podman import - gitea-runner-nix
'';
serviceConfig = {
RuntimeDirectory = "gitea-runner-nix-image";
WorkingDirectory = "/run/gitea-runner-nix-image";
Type = "oneshot";
RemainAfterExit = true;
};
};
users.users.nixuser = {
group = "nixuser";
description = "Used for running nix ci jobs";
home = "/var/empty";
isSystemUser = true;
};
users.groups.nixuser = { };
}
{
virtualisation = {
podman.enable = true;
};
virtualisation.containers.storage.settings = {
storage.driver = "btrfs";
storage.graphroot = "/var/lib/containers/storage";
storage.runroot = "/run/containers/storage";
};
virtualisation.containers.containersConf.settings = {
# podman seems to not work with systemd-resolved
containers.dns_servers = [
"8.8.8.8"
"8.8.4.4"
];
};
}
{
systemd.services =
genAttrs (builtins.genList (n: "gitea-runner-nix${builtins.toString n}") cfg.nixInstances)
(name: {
after = [
"gitea-runner-nix-image.service"
];
requires = [
"gitea-runner-nix-image.service"
];
# TODO: systemd confinment
serviceConfig = {
# Hardening (may overlap with DynamicUser=)
# The following options are only for optimizing output of systemd-analyze
AmbientCapabilities = "";
CapabilityBoundingSet = "";
# ProtectClock= adds DeviceAllow=char-rtc r
DeviceAllow = "";
NoNewPrivileges = true;
PrivateDevices = true;
PrivateMounts = true;
PrivateTmp = true;
PrivateUsers = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectSystem = "strict";
RemoveIPC = true;
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
UMask = "0066";
ProtectProc = "invisible";
SystemCallFilter = [
"~@clock"
"~@cpu-emulation"
"~@module"
"~@mount"
"~@obsolete"
"~@raw-io"
"~@reboot"
"~@swap"
# needed by go?
#"~@resources"
"~@privileged"
"~capset"
"~setdomainname"
"~sethostname"
];
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_UNIX"
"AF_NETLINK"
];
# Needs network access
PrivateNetwork = false;
# Cannot be true due to Node
MemoryDenyWriteExecute = false;
# The more restrictive "pid" option makes `nix` commands in CI emit
# "GC Warning: Couldn't read /proc/stat"
# You may want to set this to "pid" if not using `nix` commands
ProcSubset = "all";
# Coverage programs for compiled code such as `cargo-tarpaulin` disable
# ASLR (address space layout randomization) which requires the
# `personality` syscall
# You may want to set this to `true` if not using coverage tooling on
# compiled code
LockPersonality = false;
# Note that this has some interactions with the User setting; so you may
# want to consult the systemd docs if using both.
DynamicUser = true;
};
});
services.gitea-actions-runner.instances =
let
storeDeps = pkgs.runCommand "store-deps" { } ''
mkdir -p $out/bin
for dir in ${
toString [
pkgs.coreutils
pkgs.findutils
pkgs.gnugrep
pkgs.gawk
pkgs.git
pkgs.nix
pkgs.bash
pkgs.jq
pkgs.nodejs
]
}; do
for bin in "$dir"/bin/*; do
ln -s "$bin" "$out/bin/$(basename "$bin")"
done
done
# Add SSL CA certs
mkdir -p $out/etc/ssl/certs
cp -a "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt" $out/etc/ssl/certs/ca-bundle.crt
'';
in
genAttrs (builtins.genList (n: "nix${builtins.toString n}") cfg.nixInstances) (name: {
enable = true;
name = "${config.networking.hostName}-${name}";
url = "https://${cfg.domain}";
tokenFile = config.sops.secrets."forgejo/runner-registration-token".path;
labels = [ "nix:docker://gitea-runner-nix" ];
settings = {
container.options = "-e NIX_BUILD_SHELL=/bin/bash -e PAGER=cat -e PATH=/bin -e SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt --device /dev/kvm -v /nix:/nix -v ${storeDeps}/bin:/bin -v ${storeDeps}/etc/ssl:/etc/ssl --user nixuser --device=/dev/kvm";
# the default network that also respects our dns server settings
container.network = "host";
container.valid_volumes = [
"/nix"
"${storeDeps}/bin"
"${storeDeps}/etc/ssl"
];
};
});
}
]);
}

@ -1 +1 @@
Subproject commit 9a646336c5ad419ec79ae81a47d68213bdcbff92
Subproject commit 1d096ecce6a9b722dbdc70515375ec6798958c23