move around some stuff

This commit is contained in:
xunuwu 2024-11-23 17:41:01 +01:00
parent 6ab8a4e38d
commit ef537aa980
Signed by: xun
SSH key fingerprint: SHA256:Uot/1WoAjWAeqLOHA5vYy4phhVydsH7jCPmBjaPZfgI
31 changed files with 126 additions and 308 deletions

View file

@ -1,192 +0,0 @@
{
self,
inputs,
homeImports,
lib,
pkgs,
config,
...
}: let
specialArgs = {
inherit inputs self;
};
source = inputs.haumea.lib.load {
inputs = {inherit inputs lib;};
src = "${self}/nix";
};
systemProfiles = source.systemProfiles;
in {
flake.colmena = {
meta = {
nixpkgs = import inputs.nixpkgs {
system = "x86_64-linux";
};
inherit specialArgs;
};
kidney = {
deployment = {
allowLocalDeployment = true;
};
imports = lib.flatten [
./kidney
(with systemProfiles; [
core.tools
core.users
core.locale
programs.tools
programs.zsh
programs.home-manager
hardware.graphics
services.flatpak
services.xdg-portals
nix.default
nix.gc
])
{
home-manager = {
users.xun.imports = homeImports."xun@kidney";
extraSpecialArgs = specialArgs;
};
}
];
};
nixdesk = {
deployment = {
allowLocalDeployment = true;
targetUser = "xun";
targetHost = "nixdesk.local";
};
imports = lib.flatten [
./nixdesk
inputs.stylix.nixosModules.stylix
(with systemProfiles; [
secrets.default
secrets.nixdesk.default
core.security
core.users
core.ssh
core.locale
nix.default
programs.zsh
core.tools
core.compat
core.boot
core.docs
core.gvfs
nix.gc
hardware.graphics
hardware.steam-hardware
hardware.bluetooth
hardware.qmk
network.networkd
network.avahi
network.localsend
network.tailscale
network.goldberg
desktop.ly
desktop.awesome
desktop.sway
#..desktop.hyprland
programs.dconf
programs.fonts
programs.home-manager
# programs.qt
programs.adb
programs.tools
programs.thunar
services.default
services.pipewire
services.flatpak
services.syncthing
services.virt.waydroid
services.virt.virt-manager
#network.wifi
#services.ollama
desktop.x11.nosleep
themes.dark
# programs.gamemode # TEMP: TODO
# programs.gamescope # TEMP: TODO
# programs.steam # TEMP: TODO
programs.RE.default
])
{
home-manager = {
backupFileExtension = "hm-backup";
users.xun.imports = homeImports."xun@nixdesk";
extraSpecialArgs = specialArgs;
};
}
];
};
hopper = {
deployment = {
targetUser = "xun";
targetHost = "hopper.local";
};
imports = lib.flatten [
./hopper
(with systemProfiles; [
secrets.default
secrets.hopper.default
core.security
core.locale
core.tools
core.ssh
core.deploy
nix.default # TODO slim this down
network.tailscale
network.avahi
network.networkd
# services.syncthing # TODO make syncthing not rely on having "xun" user
#network.avahi
#network.networkd
#network.tailscale
#services.syncthing
])
];
};
liveiso = {
deployment.targetHost = null;
imports = lib.flatten [
./liveiso
(with systemProfiles; [
nix.default
core.security
services.default
])
];
};
};
flake.nixosConfigurations = let
l = inputs.nixpkgs.lib;
in
builtins.mapAttrs (_: v:
l.nixosSystem {
inherit specialArgs;
modules = v.imports;
}) (l.filterAttrs (n: _: n != "meta") self.colmena);
}

View file

@ -1,150 +0,0 @@
{
pkgs,
lib,
config,
...
}: {
networking.firewall.allowedTCPPorts = [
# 4444
];
systemd.services."static-web-server".after = ["brawlstats.timer"];
services.static-web-server = {
enable = true;
root = "/var/lib/brawlstats";
listen = "[::]:3434";
};
systemd.sockets."brawlstats-web" = {
wantedBy = ["sockets.target"];
socketConfig = {
ListenStream = "4444";
TriggerLimitIntervalSec = 0;
Accept = "yes";
};
};
systemd.services."brawlstats-web@" = {
serviceConfig = {
StandardInput = "socket";
ExecStart = "${pkgs.writeShellScript "brawlstats-web.sh" ''
parameters=$(head -n1 | ${lib.getExe pkgs.gawk} '{print $2}' | ${lib.getExe pkgs.gnused} 's/,/ /g')
response=""
tosvg() {
${lib.getExe pkgs.gnuplot} -c ${pkgs.writeText "gnuplotcmds" ''
set xdata time
set timefmt '%Y%m%dT%H%M%S.000Z'
set format x '%m/%d-%H:%M'
set xlabel 'Time'
set ylabel 'Trophies'
set term svg
plot "/dev/stdin" u 1:2 w lines notitle
''}
}
rm /tmp/brawlstatslog
case ''${parameters:1} in
total*)
id=$(echo $parameters | ${lib.getExe pkgs.gawk} '{print $2}')
trophies=$(cat "/var/lib/brawlstats/$id-player.json" | ${lib.getExe pkgs.jq} '.trophies')
response=$(${lib.getExe pkgs.jq} -r \
"sort_by(.battleTime)
| reverse | .[]
| .battleTime, .battle.trophyChange" "/var/lib/brawlstats/$id-log.json" \
| paste - - \
| ${lib.getExe pkgs.gawk} -v total=$trophies '{total -= $2; $2 = total}2' \
| tosvg)
;;
brawler*)
id=$(echo $parameters | ${lib.getExe pkgs.gawk} '{print $2}')
brawler=$(echo $parameters | ${lib.getExe pkgs.gawk} '{print $3}' | ${lib.getExe pkgs.gnused} 's/%20/ /g')
response=$(${lib.getExe pkgs.jq} -r \
"sort_by(.battleTime)
| reverse
| map (select (.. | .tag? == \"#$id\" and .brawler.name == \"$brawler\")).[]
| select (.battle.type == \"ranked\")
| .battleTime,
(.battle | (.teams[]?,.players) | select(.)[] | select(.tag == \"#$id\") | .brawler.trophies) + .battle.trophyChange" "/var/lib/brawlstats/$id-log.json" \
| paste - - \
| tosvg)
;;
*)
response="parameters: $parameters | firstparam: $(echo "$parameters" | ${lib.getExe pkgs.gawk} '{print $1}')"
;;
esac
echo -e "HTTP/1.1 200 OK\r\nContent-Length: $(echo "$response" | wc -c)\r\nContent-Type: text/html\r\n\r\n$response"
''}";
};
};
systemd.timers."brawlstats" = {
wantedBy = ["timers.target"];
timerConfig = {
OnCalendar = "*:0/30";
Unit = "brawlstats.service";
};
};
systemd.services."brawlstats" = {
serviceConfig = {
Type = "oneshot";
User = "root";
StateDirectory = "brawlstats";
PrivateTmp = true;
LoadCredential = "apitoken:${config.sops.secrets.brawlstars-api-key.path}";
Environment = "TOKEN=%d/apitoken";
ExecStart = pkgs.writers.writeBash "brawlstats.sh" ''
TOKEN=$(cat $TOKEN)
cd "$STATE_DIRECTORY"
ids=("VLJY22GY" "VLJV2CYL")
for id in ''${ids[@]}; do
echo "id: $id"
sleep 1
battlelogout=$(mktemp)
${lib.getExe pkgs.curl} -H "Authorization: Bearer $TOKEN" "https://api.brawlstars.com/v1/players/%23$id/battlelog" | ${lib.getExe pkgs.jq} '[.items[]]' > "$battlelogout"
sleep 1
${lib.getExe pkgs.curl} -H "Authorization: Bearer $TOKEN" "https://api.brawlstars.com/v1/players/%23$id" > "$id-player.json"
if [ ! -s "$battlelogout" ]; then
echo "battlelogout is empty"
rm "$battlelogout"
continue
fi
if [ ! -s "$id-player.json" ]; then
echo "$id-player.json is empty"
continue
fi
tmplog=$(mktemp)
cat "$battlelogout" "$id-log.json" | ${lib.getExe pkgs.jq} -s 'add | unique' > "$tmplog"
cat "$tmplog" > "$id-log.json"
rm -f "$tmplog"
rm -f "$battlelogout"
# create backup
cp "$id-log.json" "$id-log-$(date +'%s').json"
# remove old backups
find . -type f -name "$id-log-*.json" | sort | head -n -5 | xargs -r rm
done
'';
};
};
}

View file

@ -1,21 +0,0 @@
{inputs, ...}: {
imports = with inputs.hardware.nixosModules; [
common-cpu-intel
inputs.vpn-confinement.nixosModules.default
./hardware.nix
./newlab.nix
# ./brawlstats.nix
# ./lab.nix
# ./hardening.nix
];
networking.hostName = "hopper";
swapDevices = [];
networking.interfaces.eno1.wakeOnLan.enable = true;
system.stateVersion = "23.11";
}

View file

@ -1,5 +0,0 @@
{
fileSystems."/".options = ["noexec"];
fileSystems."/home".options = ["noexec"];
fileSystems."/boot".options = ["noexec"];
}

View file

@ -1,58 +0,0 @@
{config, ...}: {
nixpkgs.hostPlatform.system = "x86_64-linux";
## nvidia gpu
#services.xserver.videoDrivers = ["nvidia"];
#hardware.nvidia = {
# modesetting.enable = true;
# package = config.boot.kernelPackages.nvidiaPackages.stable;
#};
boot = {
blacklistedKernelModules = [
# "xhci_pci" # was causing issues (100% udevd cpu usage)
];
initrd = {
availableKernelModules = [
"ehci_pci"
"ahci"
"usb_storage"
"usbhid"
"sd_mod"
];
kernelModules = [];
};
kernelModules = ["kvm-intel" "wireguard"];
extraModulePackages = [];
loader = {
systemd-boot = {
enable = true;
configurationLimit = 10;
};
efi.canTouchEfiVariables = true;
};
};
fileSystems = {
"/" = {
device = "/dev/disk/by-uuid/1297e638-f2ff-49a2-a362-314ac7eeaabc";
fsType = "btrfs";
options = ["subvol=root" "compress=zstd" "autodefrag" "noatime"];
};
"/home" = {
device = "/dev/disk/by-uuid/1297e638-f2ff-49a2-a362-314ac7eeaabc";
fsType = "btrfs";
options = ["subvol=home" "compress=zstd"];
};
"/nix" = {
device = "/dev/disk/by-uuid/1297e638-f2ff-49a2-a362-314ac7eeaabc";
fsType = "btrfs";
options = ["subvol=nix" "compress=zstd" "noatime"];
};
"/boot" = {
device = "/dev/disk/by-uuid/8D4C-2F05";
fsType = "vfat";
};
};
}

View file

@ -1,305 +0,0 @@
## TODO look into sops-nix placeholders
## reference: https://github.com/javigomezo/nixos/blob/b3ebe8d570ea9b37aea8bb3a343f6e16e054e322/services/network/authelia/user_database.nix
{
pkgs,
inputs,
config,
lib,
...
}: let
domain = "xunuwu.xyz";
caddyPort = 8336;
autheliaPort = 24637;
in {
## TODO use impermanence
## TODO setup fail2ban mayb
imports = [inputs.vpn-confinement.nixosModules.default];
security.acme = {
acceptTerms = true;
certs.${domain} = {
domain = "*.${domain}";
dnsProvider = "cloudflare";
email = "xunuwu@gmail.com";
reloadServices = ["caddy.service"];
credentialFiles.CF_DNS_API_TOKEN_FILE = config.sops.secrets.cloudflare.path;
extraDomainNames = [domain];
};
};
vpnNamespaces."wg" = {
enable = true;
wireguardConfigFile = config.sops.secrets.wireguard-config.path;
accessibleFrom = [
"192.168.0.0/24"
];
# Forwarded to my vpn, for making things accessible from outside
openVPNPorts = [
{
port = caddyPort;
protocol = "tcp";
}
];
# From inside of the vpn namespace to outside of it, for making things inside accessible to LAN
portMappings = [
{
to = caddyPort;
from = caddyPort;
}
{
to = 7359; # Jellyfin auto-discovery
from = 7359;
}
{
to = 1900; # Jellyfin auto-discovery, TODO check if this actually works and dont forward these if it doesnt
from = 1900;
}
];
};
networking.firewall = {
allowedTCPPorts = [config.services.navidrome.settings.Port];
allowedUDPPorts = [1900 7359]; # Jellyfin auto-discovery
};
systemd.services.caddy.vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
services.caddy = {
enable = true;
# extraConfig = let
# gensub = x: "${x}.${domain}:${toString caddyPort}";
# tls = "tls /var/lib/acme/${domain}/cert.pem /var/lib/acme/${domain}/key.pem";
# rpPort = port: "reverse_proxy localhost:${toString port}";
# in ''
# ${gensub "navidrome"} {
# ${tls}
# ${rpPort config.services.navidrome.settings.Port}
# }
# '';
virtualHosts = let
authelia = "localhost:${toString autheliaPort}";
in
builtins.mapAttrs (n: v:
{
useACMEHost = domain;
hostName = "${n}.${domain}:${toString caddyPort}";
}
// v) {
navidrome.extraConfig = ''
reverse_proxy localhost:${toString config.services.navidrome.settings.Port}
'';
auth.extraConfig = "reverse_proxy ${authelia}";
#jellyfin.extraConfig = "reverse_proxy localhost:8096"; # TODO tmp off since i dont have proper auth yet
other = {
hostName = ":${toString caddyPort}";
extraConfig = ''
respond 404 {
body "no such route you dummy"
}
'';
};
};
};
systemd.services.navidrome = {
vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
serviceConfig = {
PrivateTmp = true;
NoNewPrivileges = true;
RestrictSUIDSGID = true;
ProtectProc = "invisible";
};
};
## TODO might be unnecessary with authelia but specifying a custom PasswordEncryptionKey is recommended
services.navidrome = {
enable = true;
settings = {
Address = "localhost";
MusicFolder = "/media/library/music";
ReverseProxyWhitelist = "0.0.0.0/0"; # cant be accessed from outside since the navidrome port isnt mapped to outside of the wireguard namespace
};
};
systemd.services.authelia-main = {
vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
# serviceConfig.LoadCredential = [
# "users.yaml:${}"
# ];
};
services.authelia.instances.main = {
enable = true;
secrets = {
jwtSecretFile = config.sops.secrets.authelia_jwt_secret.path;
storageEncryptionKeyFile = config.sops.secrets.authelia_encryption_key.path;
sessionSecretFile = config.sops.secrets.authelia_session_secret.path;
};
settings = {
# might change this to info in the future, for now its nice seeing debug messages if something goes wrong
log.level = "debug";
access_control = {
default_policy = "deny";
rules = [
{
domain = "*.${domain}";
policy = "one_factor"; # using totp requires me to set up smtp support :(
}
];
};
theme = "auto";
default_2fa_method = "totp";
## use ldap backend, not yaml file
## https://www.authelia.com/configuration/first-factor/ldap/
# default_redirection_url = "https://auth.${domain}/";
notifier.filesystem.filename = "/tmp/authelia-notifier.txt"; ## TODO change this to something reasonable
authentication_backend = {
password_reset.disable = true;
file.path = pkgs.writers.writeYAML "users.yaml" {
users.xun = {
disabled = false;
displayname = "xun";
password = "$argon2id$v=19$m=65536,t=3,p=4$cwYrForToKZn7+urMrSXuQ$PStkqPlo/7/GZ+hMsJXfOyZ0WijNtuZpaHWyZUuBWBY";
email = "xunuwu@gmail.com";
groups = ["admin"];
};
};
};
storage.postgres = {
address = "unix:///run/postgresql";
database = "authelia-main";
# this isnt used, ensureDBOwnership allows us to auth to postgres using unix users
username = "authelia-main";
password = "unused";
};
session.cookies = [
{
domain = domain;
authelia_url = "https://auth.${domain}";
default_redirection_url = "https://invalid.${domain}"; # TODO replace with overview thing mayb
}
];
## TODO: https://www.authelia.com/integration/proxies/forwarded-headers/#cloudflare
server = {
address = "127.0.0.1:${toString autheliaPort}";
endpoints.authz.forward-auth.implementation = "ForwardAuth";
};
};
};
services.postgresql = let
databases = ["authelia-main"];
in {
enable = true;
ensureDatabases = databases;
ensureUsers = lib.singleton {
name = "authelia-main";
ensureDBOwnership = true;
};
};
systemd.services.jellyfin.vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
services.jellyfin = {
enable = true;
};
services.prometheus = {
enable = true;
port = 9001;
extraFlags = ["--storage.tsdb.retention.time=30d"];
scrapeConfigs = [
{
job_name = config.networking.hostName;
static_configs = [
{
targets = ["127.0.0.1:${toString config.services.prometheus.exporters.node.port}"];
}
];
}
];
};
services.prometheus.exporters = {
node = {
enable = true;
enabledCollectors = ["systemd"];
};
};
# services.grafana = {
# enable = true;
# domain = "grafana.hopper";
# addr = "127.0.0.1";
# security = {
# adminUser = "admin";
# adminPasswordFile = config.sops.secrets.grafana-pass.path;
# };
# };
## TODO: add forgejo
## ignore this its cringe and ill prob remove it later idk, its also pasted from someone else, idk who tho ##
systemd.services.vpn-test-service = {
enable = true;
vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
script = "${pkgs.writeShellApplication {
name = "vpn-test";
runtimeInputs = with pkgs; [util-linux unixtools.ping coreutils curl bash libressl netcat-gnu openresolv dig];
text = ''
cd "$(mktemp -d)"
# DNS information
dig google.com
# Print resolv.conf
echo "/etc/resolv.conf contains:"
cat /etc/resolv.conf
# Query resolvconf
# echo "resolvconf output:"
# resolvconf -l
# echo ""
# Get ip
echo "Getting IP:"
curl -s ipinfo.io
echo -ne "DNS leak test:"
curl -s https://raw.githubusercontent.com/macvk/dnsleaktest/b03ab54d574adbe322ca48cbcb0523be720ad38d/dnsleaktest.sh -o dnsleaktest.sh
chmod +x dnsleaktest.sh
./dnsleaktest.sh
'';
}}/bin/vpn-test";
};
}

View file

@ -1,490 +0,0 @@
## TODO look into sops-nix placeholders
## reference: https://github.com/javigomezo/nixos/blob/b3ebe8d570ea9b37aea8bb3a343f6e16e054e322/services/network/authelia/user_database.nix
{
pkgs,
inputs,
config,
lib,
...
}: let
l = lib // builtins;
domain = "xunuwu.xyz";
caddyPort = 8336;
slskdUiPort = 23488;
caddyLocal = 8562;
ncPort = 46523;
# kanidmPort = 8300;
in {
## TODO use impermanence
## TODO setup fail2ban mayb
users.groups.media = {};
users.users.media = {
isSystemUser = true;
group = "media";
};
security.acme = {
acceptTerms = true;
defaults.email = "xunuwu@gmail.com";
certs = {
${domain} = {
domain = "*.${domain}";
dnsProvider = "cloudflare";
reloadServices = ["caddy.service"];
credentialFiles.CF_DNS_API_TOKEN_FILE = config.sops.secrets.cloudflare.path;
extraDomainNames = [domain];
};
};
};
vpnNamespaces."wg" = {
enable = true;
wireguardConfigFile = config.sops.secrets.wireguard.path;
accessibleFrom = [
"192.168.0.0/24"
];
# Forwarded to my vpn, for making things accessible from outside
openVPNPorts = [
{
port = caddyPort;
protocol = "tcp";
}
{
port = config.services.slskd.settings.soulseek.listen_port;
protocol = "both"; # TODO figure out which one its actually using lol
}
{
port = config.services.transmission.settings.peer-port;
protocol = "both"; # TODO figure out which one its actually using lol
}
];
# From inside of the vpn namespace to outside of it, for making things inside accessible to LAN
portMappings = let
passthrough = [
caddyPort
slskdUiPort
1900 # jellyfin discovery
7359 # jellyfin discovery
config.services.transmission.settings.rpc-port
80 # homepage
];
in
(l.map (x: {
from = x;
to = x;
})
passthrough)
++ [
];
};
networking.firewall = {
allowedUDPPorts = [1900 7359]; # Jellyfin auto-discovery
allowedTCPPorts = [
# caddy lan ports
80
443
2345
];
};
systemd.services.caddy.vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
services.caddy = {
enable = true;
virtualHosts = builtins.mapAttrs (n: v:
{
useACMEHost = domain;
hostName = "${n}.${domain}:${toString caddyPort}";
}
// v) {
jellyfin.extraConfig = "reverse_proxy localhost:8096"; # TODO setup proper auth
# kanidm.extraConfig = "reverse_proxy localhost:${toString kanidmPort}";
slskd = {
useACMEHost = null;
hostName = ":${toString slskdUiPort}";
extraConfig = ''
reverse_proxy localhost:${toString config.services.slskd.settings.web.port}
'';
};
dash = {
useACMEHost = null;
hostName = ":80";
extraConfig = "reverse_proxy localhost:${toString config.services.homepage-dashboard.listenPort}";
};
# nextcloud.extraConfig = "reverse_proxy localhost:${toString ncPort}";
other = {
hostName = ":${toString caddyPort}";
extraConfig = ''
respond 404 {
body "uhh that doesnt exist, i hope this isnt my fault.."
}
'';
};
};
};
# needed for deploying secrets
users.users.lldap = {
group = "lldap";
isSystemUser = true;
};
users.groups.lldap = {};
services.lldap = {
enable = true;
environment = {
LLDAP_JWT_SECRET_FILE = config.sops.secrets."lldap/jwt".path;
LLDAP_LDAP_USER_PASS_FILE = config.sops.secrets."lldap/password".path;
};
settings = {
ldap_base_dn = "dc=xunuwu,dc=xyz";
};
};
# services.nextcloud = {
# enable = true;
# appstoreEnable = true;
# autoUpdateApps.enable = true;
# https = true;
# hostName = "localhost";
# package = pkgs.nextcloud30;
# database.createLocally = true;
# configureRedis = true;
# extraAppsEnable = true;
# extraApps = {
# inherit (config.services.nextcloud.package.packages.apps) calendar;
# };
#
# config = {
# adminuser = "admin";
# adminpassFile = config.sops.secrets."nextcloud/admin_pass".path;
# dbtype = "pgsql";
# # commented so we just use the default sqlite
# # dbhost = "/run/postgresql";
# # dbtype = "pgsql";
# };
# settings = {
# default_phone_region = "SE";
# trusted_domains = ["127.0.0.1" "nextcloud.${domain}"];
# };
# };
# systemd.services.nginx.vpnConfinement = {
# enable = true;
# vpnNamespace = "wg";
# };
#
# services.nginx.virtualHosts."${config.services.nextcloud.hostName}".listen = [
# {
# addr = "127.0.0.1";
# port = ncPort; # NOT an exposed port
# }
# ];
# systemd.services.phpfpm-nextcloud.vpnConfinement = {
# enable = true;
# vpnNamespace = "wg";
# };
#
# systemd.services.nextcloud-setup = {
# requires = ["postgresql.service"];
# after = ["postgresql.service"];
# };
systemd.services.homepage-dashboard.vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
services.homepage-dashboard = {
enable = true;
widgets = [
{
resources = {
cpu = true;
disk = "/";
memory = true;
};
}
];
services = [
{
"Obtaining" = [
{
"transmission" = {
href = "http://hopper:9091";
icon = "transmission";
};
}
{
"slskd" = {
href = "http://hopper:23488";
icon = "slskd";
};
}
];
}
{
"Services" = [
{
"jellyfin" = {
href = "https://jellyfin.xunuwu.xyz";
icon = "jellyfin";
};
}
# {
# "nextcloud" = {
# href = "https://nextcloud.xunuwu.xyz";
# icon = "nextcloud";
# };
# }
];
}
];
};
systemd.services.jellyfin.vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
services.jellyfin = {
enable = true;
};
services.prometheus = {
enable = true;
port = 9001;
extraFlags = ["--storage.tsdb.retention.time=30d"];
scrapeConfigs = [
{
job_name = config.networking.hostName;
static_configs = [
{
targets = [
"127.0.0.1:${toString config.services.prometheus.exporters.node.port}"
"127.0.0.1:${toString config.services.prometheus.exporters.systemd.port}"
# "127.0.0.1:${toString config.services.prometheus.exporters.wireguard.port}"
];
}
];
}
];
};
services.prometheus.exporters = {
node = {
enable = true;
enabledCollectors = ["systemd"];
};
systemd.enable = true;
# wireguard = {
# enable = true;
# wireguardConfig = config.sops.secrets.wireguard.path;
# };
# nextcloud = {
# enable = true;
# tokenFile = config.sops.secrets."prometheus/nextcloud".path;
# url = "https://nextcloud.${domain}";
# };
};
systemd.services.slskd.vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
services.slskd = {
enable = true;
environmentFile = config.sops.secrets.slskd.path;
domain = null; # why isnt this the default?
settings = {
remote_file_management = true;
shares.directories = ["/media/library/music"];
soulseek = {
listen_port = 14794;
description = "";
};
global = {
upload = {
slots = 50;
speed_limit = 10000;
};
download.speed_limit = 10000;
};
};
};
systemd.services.transmission.vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
services.transmission = {
enable = true;
performanceNetParameters = true;
settings = let
mbit = 125;
in {
speed-limit-up-enabled = true;
speed-limit-up = 100 * mbit;
speed-limit-down-enabled = true;
speed-limit-down = 150 * mbit;
rpc-authentication-required = true;
peer-port = 11936;
rpc-bind-address = "0.0.0.0";
rpc-whitelist = "127.0.0.1,192.168.\*.\*";
};
credentialsFile = config.sops.secrets.transmission.path;
};
# only used for samba
users.groups.xun = {};
users.users.xun = {
isSystemUser = true;
group = "xun";
extraGroups = ["transmission" "vault" "media"];
};
users.groups.vault = {};
systemd.tmpfiles.rules = [
"d /srv/vault 0770 root vault -"
];
services.samba = {
enable = true;
openFirewall = true;
settings = {
global = {
"log level" = 6;
"log file" = "/var/log/samba/samba.log";
"server string" = config.networking.hostName;
"hosts allow" = "192.168.50.0/24";
"map to guest" = "bad user";
};
transmission = {
path = "/var/lib/transmission";
browseable = "yes";
"read only" = "yes";
"guest ok" = "no";
"create mask" = "0664";
"directory mask" = "0775";
};
vault = {
path = "/srv/vault";
browseable = "yes";
"read only" = "no";
"guest ok" = "no";
"create mask" = "0660";
"directory mask" = "0770";
"force user" = "xun";
"force group" = "xun";
};
slskd = {
path = "/var/lib/slskd";
browseable = "yes";
"read only" = "no";
"guest ok" = "no";
"create mask" = "0660";
"directory mask" = "0770";
"force user" = "slskd";
"force group" = "slskd";
};
library = {
path = "media/library";
browseable = "yes";
"read only" = "no";
"guest ok" = "no";
"create mask" = "0666";
"directory mask" = "0777";
"force user" = "media";
"force group" = "media";
};
};
};
# TODO use this for sso with some things maybe
# services.tailscaleAuth = {
# enable = true;
# user = config.services.caddy.user;
# group = config.services.caddy.group;
# };
# systemd.services.kanidm = {
# vpnConfinement = {
# enable = true;
# vpnNamespace = "wg";
# };
# serviceConfig = {
# RestartSec = "60";
# SupplementaryGroups = [config.security.acme.certs.${domain}.group];
# PrivateNetwork = l.mkOverride 40 false;
# ProtectControlGroups = l.mkForce false;
# RestrictNamespaces = l.mkForce false;
# LockPersonality = l.mkForce false;
# CapabilityBoundingSet = l.mkForce [];
# # TemporaryFileSystem = l.mkForce [];
# };
# };
#
# services.kanidm = {
# package = pkgs.kanidm.override {enableSecretProvisioning = true;};
#
# enableServer = true;
# serverSettings = let
# subdomain = "kanidm";
# kdomain = "${subdomain}.${domain}";
# certDir = config.security.acme.certs.${domain}.directory;
# in {
# domain = kdomain;
# origin = "https://${kdomain}";
# bindaddress = "0.0.0.0:${toString kanidmPort}";
# # ldapbindaddress = "[::1]:636";
# trust_x_forward_for = true;
# tls_chain = "${certDir}/fullchain.pem";
# tls_key = "${certDir}/key.pem";
# ## TODO online_backup mayb
# };
#
# provision = {
# enable = true;
#
# adminPasswordFile = config.sops.secrets."kanidm/admin_pass".path;
# idmAdminPasswordFile = config.sops.secrets."kanidm/idm_admin_pass".path;
#
# persons = let
# mainUser = "xun";
# mail = "xunuwu@gmail.com";
# in {
# ${mainUser} = {
# displayName = mainUser;
# legalName = mainUser;
# mailAddresses = [mail];
# groups = [
# "slskd.access"
# "slskd.admins"
# ];
# };
# };
#
# groups = {
# "slskd.access" = {};
# "slskd.admins" = {};
# };
#
# # systems.oath2 = {
# # slskd = {
# # displayName = "slskd";
# # originUrl = "https://";
# # };
# # };
# };
# };
## TODO: add forgejo
}

View file

@ -1,11 +0,0 @@
{
imports = [
./wsl.nix
./hardware.nix
./fonts.nix
];
networking.hostName = "kidney";
system.stateVersion = "24.05";
}

View file

@ -1,18 +0,0 @@
{
pkgs,
self,
...
}: {
fonts = {
packages = with pkgs; [
font-awesome
iosevka
emacs-all-the-icons-fonts
self.packages.${pkgs.system}.cartograph-cf
];
enableDefaultPackages = false;
fontconfig.defaultFonts = {
monospace = ["Iosevka"];
};
};
}

View file

@ -1,3 +0,0 @@
{
nixpkgs.hostPlatform.system = "x86_64-linux";
}

View file

@ -1,11 +0,0 @@
{inputs, ...}: {
imports = [
inputs.nixos-wsl.nixosModules.default
];
wsl = {
enable = true;
defaultUser = "xun";
startMenuLaunchers = true;
};
}

View file

@ -1,18 +0,0 @@
{pkgs, ...}: {
imports = [
./tools.nix
./sway.nix
];
environment.systemPackages = with pkgs; [
firefox
];
isoImage.edition = "sway-custom";
networking.hostName = "liveiso";
nixpkgs.hostPlatform = "x86_64-linux";
system.stateVersion = "23.11";
}

View file

@ -1,17 +0,0 @@
{modulesPath, ...}: {
imports = [
"${modulesPath}/installer/cd-dvd/installation-cd-graphical-base.nix"
];
programs.sway = {
enable = true;
};
services.displayManager = {
sddm.enable = true;
autoLogin = {
enable = true;
user = "nixos";
};
};
}

View file

@ -1,7 +0,0 @@
{pkgs, ...}: {
environment.systemPackages = with pkgs; [
neovim
parted
gparted
];
}

View file

@ -1,36 +0,0 @@
{lib, ...}: {
imports = [
./hardware.nix
./hibernate-boot.nix
./testing.nix
./samba-mount.nix
];
networking.hostName = "nixdesk";
#swapDevices = lib.singleton {
# device = "/dev/disk/by-uuid/1dcce4ab-71da-4928-83d5-62b20fd0fddf";
#};
#boot.resumeDevice = "/dev/disk/by-uuid/1dcce4ab-71da-4928-83d5-62b20fd0fddf";
#boot.kernelParams = [
# "resume=UUID=1dcce4ab-71da-4928-83d5-62b20fd0fddf"
# "resume_offset=3841492992" # fdisk -l
#];
nixpkgs.config = {
rocmSupport = true;
allowUnfreePredicate = pkg:
builtins.elem (lib.getName pkg) [
"discord"
"steam"
"steam-unwrapped"
"rider"
];
};
networking.interfaces.eno1.wakeOnLan.enable = true;
system.stateVersion = "23.11";
}

View file

@ -1,95 +0,0 @@
{
inputs,
config,
pkgs,
lib,
...
}: {
imports = [
inputs.hardware.nixosModules.common-cpu-amd
inputs.hardware.nixosModules.common-gpu-amd
inputs.hardware.nixosModules.common-pc-ssd
inputs.hardware.nixosModules.gigabyte-b550
];
boot = {
kernelPackages = pkgs.linuxPackages_latest;
initrd = {
availableKernelModules = [
"nvme"
"xhci_pci"
"ahci"
"usb_storage"
"usbhid"
"sd_mod"
];
kernelModules = ["amdgpu"];
};
kernelModules = ["kvm-amd"];
extraModulePackages = with config.boot.kernelPackages; [
rtl88xxau-aircrack # usb wifi card
];
loader = {
timeout = 10;
systemd-boot = {
enable = true;
consoleMode = "max";
configurationLimit = 120;
editor = false;
};
efi = {
canTouchEfiVariables = true;
efiSysMountPoint = "/boot";
};
};
};
fileSystems = {
"/" = {
device = "/dev/disk/by-uuid/d87276c0-ef9c-422e-b2de-effc1b47c654";
fsType = "btrfs";
options = ["subvol=root" "compress=zstd"];
};
"/home" = {
device = "/dev/disk/by-uuid/d87276c0-ef9c-422e-b2de-effc1b47c654";
fsType = "btrfs";
options = ["subvol=home" "compress=zstd"];
};
"/nix" = {
device = "/dev/disk/by-uuid/d87276c0-ef9c-422e-b2de-effc1b47c654";
fsType = "btrfs";
options = ["subvol=nix" "compress=zstd" "noatime"];
};
"/.swapvol" = {
device = "/dev/disk/by-uuid/d87276c0-ef9c-422e-b2de-effc1b47c654";
fsType = "btrfs";
options = ["subvol=swap" "noatime"];
};
"/boot" = {
device = "/dev/disk/by-uuid/588B-CB97";
fsType = "vfat";
};
};
boot.resumeDevice = "/dev/disk/by-uuid/d87276c0-ef9c-422e-b2de-effc1b47c654";
# btrfs inspect-internal map-swapfile -r /.swapvol/swapfile
boot.kernelParams = ["resume_offset=76293376"];
swapDevices = lib.singleton {
device = "/.swapvol/swapfile";
};
hardware.enableRedistributableFirmware = true;
services.xserver.videoDrivers = [
"amdgpu"
#"nvidia"
];
#hardware.nvidia = {
# modesetting.enable = true;
# package = config.boot.kernelPackages.nvidiaPackages.stable;
#};
nixpkgs.hostPlatform.system = "x86_64-linux";
hardware.cpu.amd.updateMicrocode = true;
}

View file

@ -1,28 +0,0 @@
{pkgs, ...}: {
# hibernate and reboot to firmware
# this allows me to save linux state and boot into another os (such as windows)
# make sure not to mount any filesystems from the other os or you risk losing data
environment.systemPackages = [
(pkgs.writeShellScriptBin "hib-boot" ''
set -e
if [ ! -v 1 ]; then
echo "no argument provided"
echo "please provide the id for the os you want to boot"
echo "these are the valid id's:"
echo ""
${pkgs.efibootmgr}/bin/efibootmgr
exit
fi
if [ ! -w /sys/power/disk -o ! -w /sys/power/state ]; then
echo "you lack permission to write to /sys/power/{disk,state}, are you not running this script as root?"
exit
fi
${pkgs.efibootmgr}/bin/efibootmgr -n "$1" >/dev/null
echo reboot >/sys/power/disk
echo disk >/sys/power/state
'')
];
}

View file

@ -1,94 +0,0 @@
{config, ...}: {
security.acme = {
acceptTerms = true;
defaults = {
email = "xunuwu@gmail.com";
reloadServices = ["podman-caddy.service"];
};
certs = {
"xun.cam" = {
dnsProvider = "cloudflare";
credentialFiles = {
CF_DNS_API_TOKEN_FILE = config.sops.secrets.cloudflare.path;
};
extraDomainNames = ["jellyfin.desktop.xun.cam"];
};
};
};
virtualisation.podman = {
enable = true;
autoPrune.enable = true;
dockerSocket.enable = true;
};
systemd.tmpfiles.rules = [
"d /media/config/caddy/data 0750 root root -"
"d /media/config/caddy/config 0750 root root -"
"d /media/config/jellyfin/config 0750 root root -"
"d /media/config/jellyfin/cache 0750 root root -"
"d /media/library 0750 root root -"
];
virtualisation.oci-containers = {
backend = "podman";
containers = {
gluetun = {
image = "qmcgaw/gluetun:v3";
volumes = [
"${config.sops.secrets.wireguard.path}:/gluetun/wireguard/wg0.conf"
];
ports = [
## This bypasses the firewall
## use 127.0.0.1:XXXX:XXXX if you only want it to be accessible locally
"8096:8096" # jellyfin local network
"60926:60926" # jellyfin
];
environment = {
VPN_SERVICE_PROVIDER = "airvpn";
VPN_TYPE = "wireguard";
SERVER_COUNTRIES = "Netherlands";
FIREWALL_VPN_INPUT_PORTS = "60926";
};
extraOptions = [
"--cap-add=NET_ADMIN"
"--device=/dev/net/tun:/dev/net/tun"
];
};
jellyfin = {
image = "jellyfin/jellyfin";
volumes = [
"/media/config/jellyfin/config:/config"
"/media/config/jellyfin/cache:/cache"
"/media/library:/library"
];
dependsOn = ["gluetun"];
extraOptions = [
"--network=container:gluetun"
"--device=/dev/dri:/dev/dri"
];
};
caddy = {
image = "caddy";
volumes = [
"${builtins.toFile "Caddyfile" ''
https://jellyfin.desktop.xun.cam:60926 {
tls /etc/ssl/certs/xun.cam/cert.pem /etc/ssl/certs/xun.cam/key.pem
reverse_proxy localhost:8096
}
''}:/etc/caddy/Caddyfile"
"/var/lib/acme/xun.cam:/etc/ssl/certs/xun.cam"
"/media/config/caddy/data:/data"
"/media/config/caddy/config:/config"
];
dependsOn = ["gluetun"];
extraOptions = [
"--network=container:gluetun"
];
};
};
};
}

View file

@ -1,72 +0,0 @@
{
config,
pkgs,
...
}: {
environment.systemPackages = [pkgs.cifs-utils];
systemd.mounts = [
{
description = "smb hopper transmission download directory";
what = "//192.168.50.97/transmission"; # hopper local ip
where = "/server/transmission";
type = "cifs";
options = "uid=xun,gid=users,credentials=${config.sops.secrets.samba.path}";
}
{
description = "smb hopper vault";
what = "//192.168.50.97/vault"; # hopper local ip
where = "/server/vault";
type = "cifs";
options = "uid=xun,gid=users,credentials=${config.sops.secrets.samba.path}";
}
{
description = "smb hopper library";
what = "//192.168.50.97/library"; # hopper local ip
where = "/server/library";
type = "cifs";
options = "uid=xun,gid=users,credentials=${config.sops.secrets.samba.path},vers=3.0";
}
{
description = "smb hopper slskd files";
what = "//192.168.50.97/slskd"; # hopper local ip
where = "/server/slskd";
type = "cifs";
options = "uid=xun,gid=users,credentials=${config.sops.secrets.samba.path}";
}
];
systemd.automounts = [
{
requires = ["network-online.target"];
where = "/server/transmission";
wantedBy = ["multi-user.target"];
automountConfig = {
TimeoutIdleSec = "10min";
};
}
{
requires = ["network-online.target"];
where = "/server/vault";
wantedBy = ["multi-user.target"];
automountConfig = {
TimeoutIdleSec = "10min";
};
}
{
requires = ["network-online.target"];
where = "/server/library";
wantedBy = ["multi-user.target"];
automountConfig = {
TimeoutIdleSec = "10min";
};
}
{
requires = ["network-online.target"];
where = "/server/slskd";
wantedBy = ["multi-user.target"];
automountConfig = {
TimeoutIdleSec = "10min";
};
}
];
}

View file

@ -1,17 +0,0 @@
{self, ...}: {
imports = [
self.nixosModules.xun
];
xun.gaming = let
enabled = {enable = true;};
in {
enable = true;
steam = enabled;
gamescope = enabled;
gamemode = enabled;
sunshine = {
enable = true;
openFirewall = true;
};
};
}