some reorganizing

This commit is contained in:
xunuwu 2025-01-16 10:08:58 +01:00
parent df9e8ae8bf
commit 113e015135
Signed by: xun
SSH key fingerprint: SHA256:Uot/1WoAjWAeqLOHA5vYy4phhVydsH7jCPmBjaPZfgI
182 changed files with 443 additions and 449 deletions

View file

@ -0,0 +1,38 @@
{
inputs,
systemProfiles,
...
}: {
imports = with systemProfiles; [
inputs.hardware.nixosModules.common-cpu-intel
inputs.vpn-confinement.nixosModules.default
./hardware.nix
./lab
secrets.default
secrets.hopper
core.security
core.locale
core.tools
core.ssh
core.deploy
nix.default # TODO slim this down
network.tailscale
network.avahi
network.networkd
# services.syncthing # TODO make syncthing not rely on having "xun" user
];
networking.hostName = "hopper";
swapDevices = [];
networking.interfaces.eno1.wakeOnLan.enable = true;
system.stateVersion = "23.11";
}

View file

@ -0,0 +1,58 @@
{config, ...}: {
nixpkgs.hostPlatform.system = "x86_64-linux";
## nvidia gpu
#services.xserver.videoDrivers = ["nvidia"];
#hardware.nvidia = {
# modesetting.enable = true;
# package = config.boot.kernelPackages.nvidiaPackages.stable;
#};
boot = {
blacklistedKernelModules = [
"xhci_pci" # was causing issues (100% udevd cpu usage)
];
initrd = {
availableKernelModules = [
"ehci_pci"
"ahci"
"usb_storage"
"usbhid"
"sd_mod"
];
kernelModules = [];
};
kernelModules = ["kvm-intel" "wireguard"];
extraModulePackages = [];
loader = {
systemd-boot = {
enable = true;
configurationLimit = 10;
};
efi.canTouchEfiVariables = true;
};
};
fileSystems = {
"/" = {
device = "/dev/disk/by-uuid/1297e638-f2ff-49a2-a362-314ac7eeaabc";
fsType = "btrfs";
options = ["subvol=root" "compress=zstd" "autodefrag" "noatime"];
};
"/home" = {
device = "/dev/disk/by-uuid/1297e638-f2ff-49a2-a362-314ac7eeaabc";
fsType = "btrfs";
options = ["subvol=home" "compress=zstd"];
};
"/nix" = {
device = "/dev/disk/by-uuid/1297e638-f2ff-49a2-a362-314ac7eeaabc";
fsType = "btrfs";
options = ["subvol=nix" "compress=zstd" "noatime"];
};
"/boot" = {
device = "/dev/disk/by-uuid/8D4C-2F05";
fsType = "vfat";
};
};
}

View file

@ -0,0 +1,572 @@
## TODO look into sops-nix placeholders
## reference: https://github.com/javigomezo/nixos/blob/b3ebe8d570ea9b37aea8bb3a343f6e16e054e322/services/network/authelia/user_database.nix
{
pkgs,
inputs,
config,
lib,
...
}: let
l = lib // builtins;
domain = "xunuwu.xyz";
caddyPort = 8336;
slskdUiPort = 23488;
caddyLocal = 8562;
ncPort = 46523;
kanidmPort = 8300;
in {
imports = [
./samba.nix
];
## TODO use kanidm
## TODO use impermanence
## TODO setup fail2ban mayb
users.groups.media = {};
users.users.media = {
isSystemUser = true;
group = "media";
};
security.acme = {
acceptTerms = true;
defaults.email = "xunuwu@gmail.com";
certs = {
${domain} = {
domain = "*.${domain}";
dnsProvider = "cloudflare";
reloadServices = ["caddy.service"];
credentialFiles.CF_DNS_API_TOKEN_FILE = config.sops.secrets.cloudflare.path;
extraDomainNames = [domain];
};
"kanidm.${domain}" = {
domain = "kanidm.${domain}";
group = "kanidm";
dnsProvider = "cloudflare";
reloadServices = ["caddy.service" "kanidm.service"];
credentialFiles.CF_DNS_API_TOKEN_FILE = config.sops.secrets.cloudflare.path;
};
};
};
## make sure vpn connection is reasonably fast
## god, there has to be a proper, not horrible way of doing this
# systemd.services."wg-speedcheck" = {
# serviceConfig = {
# Type = "oneshot";
# ExecCondition = "${config.systemd.package}/bin/systemctl is-active wg.service"; # horrible, horrible hack, theres 100% a better way
# ExecStart = pkgs.writers.writeBash "wg-speedcheck.sh" ''
# echo "running test in netns"
# vpn_result=$( ${pkgs.iproute2}/bin/ip netns exec wg ${pkgs.speedtest-cli}/bin/speedtest --json )
# vpn_download=$( echo "$vpn_result" | ${l.getExe pkgs.jq} '.download' )
# vpn_upload=$( echo "$vpn_result" | ${l.getExe pkgs.jq} '.upload' )
#
# echo "running test outside of netns"
# normal_result=$( ${pkgs.speedtest-cli}/bin/speedtest --json )
# normal_download=$( echo "$normal_result" | ${l.getExe pkgs.jq} '.download' )
# normal_upload=$( echo "$normal_result" | ${l.getExe pkgs.jq} '.upload' )
#
# download_ratio_is_more_than_half=$( echo "$vpn_download / $normal_download > 0.5" | ${l.getExe pkgs.bc} -l | tr -d '\n' )
# upload_ratio_is_more_than_half=$( echo "$vpn_upload / $normal_upload > 0.5" | ${l.getExe pkgs.bc} -l | tr -d '\n' )
#
# if [[ "$upload_ratio_is_more_than_half" == "0" || "$download_ratio_is_more_than_half" == "0" ]]; then
# echo "ratio is insufficient, restarting vpn"
# systemctl restart wg.service
# exit
# fi
# echo "ratio is sufficient"
# '';
# };
# };
# systemd.timers."wg-speedcheck" = {
# wantedBy = ["timers.target"];
# timerConfig = {
# OnCalendar = "0/2:00:00";
# Unit = "wg-speedcheck.service";
# };
# };
vpnNamespaces."wg" = {
enable = true;
wireguardConfigFile = config.sops.secrets.wireguard.path;
accessibleFrom = [
"192.168.0.0/24"
];
# Forwarded to my vpn, for making things accessible from outside
openVPNPorts = [
{
port = caddyPort;
protocol = "tcp";
}
{
port = config.services.slskd.settings.soulseek.listen_port;
protocol = "both";
}
{
port = config.services.transmission.settings.peer-port;
protocol = "both";
}
];
# From inside of the vpn namespace to outside of it, for making things inside accessible to LAN
portMappings = let
passthrough = [
caddyPort
slskdUiPort
1900 # jellyfin discovery
7359 # jellyfin discovery
config.services.transmission.settings.rpc-port
80 # homepage
];
in (l.map (x: {
from = x;
to = x;
})
passthrough);
};
networking.firewall = {
allowedUDPPorts = [1900 7359]; # Jellyfin auto-discovery
allowedTCPPorts = [
# caddy lan ports
80
443
2345
];
};
systemd.services.caddy.vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
services.caddy = {
enable = true;
virtualHosts = builtins.mapAttrs (n: v:
{
useACMEHost = domain;
hostName = "${n}.${domain}:${toString caddyPort}";
}
// v) {
jellyfin.extraConfig = "reverse_proxy localhost:8096"; # TODO setup proper auth
kanidm = {
useACMEHost = null;
# hostName = "kanidm.xunuwu.xyz:${toString caddyPort}";
extraConfig = ''
reverse_proxy https://127.0.0.1:${toString kanidmPort} {
header_up Host {upstream_hostport}
header_down Access-Control-Allow-Origin "*"
transport http {
tls_server_name ${config.services.kanidm.serverSettings.domain}
}
}
'';
};
slskd = {
useACMEHost = null;
hostName = ":${toString slskdUiPort}";
extraConfig = ''
reverse_proxy localhost:${toString config.services.slskd.settings.web.port}
'';
};
dash = {
useACMEHost = null;
hostName = ":80";
extraConfig = "reverse_proxy localhost:${toString config.services.homepage-dashboard.listenPort}";
};
# nextcloud.extraConfig = "reverse_proxy localhost:${toString ncPort}";
other = {
hostName = ":${toString caddyPort}";
extraConfig = ''
respond 404 {
body "uhh that doesnt exist, i hope this isnt my fault.."
}
'';
};
};
};
# systemd.services.authentik.vpnConfinement = {
# enable = true;
# vpnNamespace = "wg";
# };
# services = {
# authentik = {
# enable = true;
# environmentFile = config.sops.secrets.authentik.path;
# settings = {
# disable_startup_analytics = true;
# avatars = "initials";
# };
# };
# authentik-ldap = {
# enable = true;
# };
# };
# services.keycloak = {
# enable = true;
# settings = {
# hostname = "keycloak.${domain}";
# };
# database.passwordFile = config.sops.secrets."keycloak/db".path;
# };
# needed for deploying secrets
users.users.lldap = {
group = "lldap";
isSystemUser = true;
};
users.groups.lldap = {};
services.lldap = {
enable = true;
environment = {
LLDAP_JWT_SECRET_FILE = config.sops.secrets."lldap/jwt".path;
LLDAP_LDAP_USER_PASS_FILE = config.sops.secrets."lldap/password".path;
};
settings = {
ldap_base_dn = "dc=xunuwu,dc=xyz";
};
};
# services.nextcloud = {
# enable = true;
# appstoreEnable = true;
# autoUpdateApps.enable = true;
# https = true;
# hostName = "localhost";
# package = pkgs.nextcloud30;
# database.createLocally = true;
# configureRedis = true;
# extraAppsEnable = true;
# extraApps = {
# inherit (config.services.nextcloud.package.packages.apps) calendar;
# };
#
# config = {
# adminuser = "admin";
# adminpassFile = config.sops.secrets."nextcloud/admin_pass".path;
# dbtype = "pgsql";
# # commented so we just use the default sqlite
# # dbhost = "/run/postgresql";
# # dbtype = "pgsql";
# };
# settings = {
# default_phone_region = "SE";
# trusted_domains = ["127.0.0.1" "nextcloud.${domain}"];
# };
# };
# systemd.services.nginx.vpnConfinement = {
# enable = true;
# vpnNamespace = "wg";
# };
#
# services.nginx.virtualHosts."${config.services.nextcloud.hostName}".listen = [
# {
# addr = "127.0.0.1";
# port = ncPort; # NOT an exposed port
# }
# ];
# systemd.services.phpfpm-nextcloud.vpnConfinement = {
# enable = true;
# vpnNamespace = "wg";
# };
#
# systemd.services.nextcloud-setup = {
# requires = ["postgresql.service"];
# after = ["postgresql.service"];
# };
systemd.services.homepage-dashboard.vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
services.homepage-dashboard = {
enable = true;
widgets = [
{
resources = {
cpu = true;
disk = "/";
memory = true;
};
}
];
services = [
{
"Obtaining" = [
{
"transmission" = {
href = "http://${config.networking.hostName}:9091";
icon = "transmission";
};
}
{
"slskd" = {
href = "http://${config.networking.hostName}:23488";
icon = "slskd";
};
}
];
}
{
"Services" = [
{
"jellyfin" = {
href = "https://jellyfin.xunuwu.xyz";
icon = "jellyfin";
};
}
{
"lldap" = {
href = "http://${config.networking.hostName}:${toString config.services.lldap.settings.http_port}";
icon = "lldap";
};
}
# {
# "nextcloud" = {
# href = "https://nextcloud.xunuwu.xyz";
# icon = "nextcloud";
# };
# }
];
}
];
};
systemd.services.jellyfin.vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
services.jellyfin = {
enable = true;
};
services.prometheus = {
enable = true;
port = 9001;
extraFlags = ["--storage.tsdb.retention.time=30d"];
scrapeConfigs = [
{
job_name = "node";
static_configs = lib.singleton {
targets = ["127.0.0.1:${toString config.services.prometheus.exporters.node.port}"];
};
}
{
job_name = "systemd";
static_configs = lib.singleton {
targets = ["127.0.0.1:${toString config.services.prometheus.exporters.systemd.port}"];
};
}
{
job_name = "tailscale_client";
static_configs = lib.singleton {
targets = ["100.100.100.100"];
};
}
# TODO figure out why i cant connect to slskd locally
# {
# job_name = "slskd";
# static_configs = lib.singleton {
# targets = ["127.0.0.1:${toString slskdUiPort}"];
# };
# }
];
};
services.prometheus.exporters = {
node = {
enable = true;
enabledCollectors = ["systemd"];
};
systemd.enable = true;
# wireguard = {
# enable = true;
# wireguardConfig = config.sops.secrets.wireguard.path;
# };
# nextcloud = {
# enable = true;
# tokenFile = config.sops.secrets."prometheus/nextcloud".path;
# url = "https://nextcloud.${domain}";
# };
};
systemd.services.slskd.vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
services.slskd = {
enable = true;
environmentFile = config.sops.secrets.slskd.path;
domain = null; # why isnt this the default?
settings = {
metrics = {
enabled = true;
authentication.disabled = true;
};
remote_file_management = true;
shares.directories = ["/media/library/music"];
soulseek = {
listen_port = 14794;
description = "";
};
global = {
upload = {
slots = 50;
speed_limit = 10000;
};
download.speed_limit = 10000;
};
};
};
systemd.services.transmission.vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
services.transmission = {
enable = true;
package = pkgs.transmission_4;
performanceNetParameters = true;
settings = let
mbit = 125;
in {
speed-limit-up-enabled = true;
speed-limit-up = 100 * mbit;
speed-limit-down-enabled = true;
speed-limit-down = 150 * mbit;
rpc-authentication-required = true;
peer-port = 11936;
rpc-bind-address = "0.0.0.0";
rpc-whitelist = "127.0.0.1,192.168.\*.\*";
};
credentialsFile = config.sops.secrets.transmission.path;
};
# TODO use this for sso with some things maybe
# services.tailscaleAuth = {
# enable = true;
# user = config.services.caddy.user;
# group = config.services.caddy.group;
# };
systemd.services.kanidm = {
vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
serviceConfig = {
InaccessiblePaths = lib.mkForce [];
};
};
boot.kernel.sysctl."fs.inotify.max_user_watches" = 524288;
services.kanidm = {
package = pkgs.kanidm_1_4.override {enableSecretProvisioning = true;};
enableServer = true;
serverSettings = {
domain = "kanidm.${domain}";
origin = "https://kanidm.${domain}";
bindaddress = "127.0.0.1:${toString kanidmPort}";
ldapbindaddress = "[::1]:3636";
trust_x_forward_for = true;
tls_chain = "${config.security.acme.certs."kanidm.${domain}".directory}/fullchain.pem";
tls_key = "${config.security.acme.certs."kanidm.${domain}".directory}/key.pem";
};
provision = {
enable = true;
adminPasswordFile = config.sops.secrets."kanidm/admin_pass".path;
idmAdminPasswordFile = config.sops.secrets."kanidm/idm_admin_pass".path;
persons = {
"xun" = {
displayName = "xun";
legalName = "xun";
mailAddresses = ["xunuwu@gmail.com"];
groups = [];
};
};
};
};
# systemd.services.kanidm = {
# vpnConfinement = {
# enable = true;
# vpnNamespace = "wg";
# };
# serviceConfig = {
# RestartSec = "60";
# SupplementaryGroups = [config.security.acme.certs.${domain}.group];
# PrivateNetwork = l.mkOverride 40 false;
# ProtectControlGroups = l.mkForce false;
# RestrictNamespaces = l.mkForce false;
# LockPersonality = l.mkForce false;
# CapabilityBoundingSet = l.mkForce [];
# # TemporaryFileSystem = l.mkForce [];
# };
# };
#
# services.kanidm = {
# package = pkgs.kanidm.override {enableSecretProvisioning = true;};
#
# enableServer = true;
# serverSettings = let
# subdomain = "kanidm";
# kdomain = "${subdomain}.${domain}";
# certDir = config.security.acme.certs.${domain}.directory;
# in {
# domain = kdomain;
# origin = "https://${kdomain}";
# bindaddress = "0.0.0.0:${toString kanidmPort}";
# # ldapbindaddress = "[::1]:636";
# trust_x_forward_for = true;
# tls_chain = "${certDir}/fullchain.pem";
# tls_key = "${certDir}/key.pem";
# ## TODO online_backup mayb
# };
#
# provision = {
# enable = true;
#
# adminPasswordFile = config.sops.secrets."kanidm/admin_pass".path;
# idmAdminPasswordFile = config.sops.secrets."kanidm/idm_admin_pass".path;
#
# persons = let
# mainUser = "xun";
# mail = "xunuwu@gmail.com";
# in {
# ${mainUser} = {
# displayName = mainUser;
# legalName = mainUser;
# mailAddresses = [mail];
# groups = [
# "slskd.access"
# "slskd.admins"
# ];
# };
# };
#
# groups = {
# "slskd.access" = {};
# "slskd.admins" = {};
# };
#
# # systems.oath2 = {
# # slskd = {
# # displayName = "slskd";
# # originUrl = "https://";
# # };
# # };
# };
# };
## TODO: add forgejo
}

View file

@ -0,0 +1,65 @@
{config, ...}: {
# only used for samba
users.groups.xun = {};
users.users.xun = {
isSystemUser = true;
group = "xun";
extraGroups = ["transmission" "vault" "media"];
};
users.groups.vault = {};
systemd.tmpfiles.rules = [
"d /srv/vault 0770 root vault -"
];
services.samba = {
enable = true;
openFirewall = true;
settings = {
global = {
"log level" = 6;
"log file" = "/var/log/samba/samba.log";
"server string" = config.networking.hostName;
"hosts allow" = "192.168.50.0/24";
"map to guest" = "bad user";
};
transmission = {
path = "/var/lib/transmission";
browseable = "yes";
"read only" = "yes";
"guest ok" = "no";
"create mask" = "0664";
"directory mask" = "0775";
};
vault = {
path = "/srv/vault";
browseable = "yes";
"read only" = "no";
"guest ok" = "no";
"create mask" = "0660";
"directory mask" = "0770";
"force user" = "xun";
"force group" = "xun";
};
slskd = {
path = "/var/lib/slskd";
browseable = "yes";
"read only" = "no";
"guest ok" = "no";
"create mask" = "0660";
"directory mask" = "0770";
"force user" = "slskd";
"force group" = "slskd";
};
library = {
path = "media/library";
browseable = "yes";
"read only" = "no";
"guest ok" = "no";
"create mask" = "0666";
"directory mask" = "0777";
"force user" = "media";
"force group" = "media";
};
};
};
}

View file

@ -0,0 +1,41 @@
{
specialArgs,
systemProfiles,
homeSuites,
...
}: {
imports = with systemProfiles; [
./wsl.nix
./hardware.nix
./fonts.nix
core.tools
core.users
core.locale
programs.tools
programs.zsh
programs.home-manager
hardware.graphics
services.flatpak
services.xdg-portals
nix.default
nix.gc
{
home-manager = {
users.xun.imports = [
homeSuites.kidney
{home.stateVersion = "24.05";}
];
extraSpecialArgs = specialArgs;
};
}
];
networking.hostName = "kidney";
system.stateVersion = "24.05";
}

View file

@ -0,0 +1,18 @@
{
pkgs,
self,
...
}: {
fonts = {
packages = with pkgs; [
font-awesome
iosevka
emacs-all-the-icons-fonts
self.packages.${pkgs.system}.cartograph-cf
];
enableDefaultPackages = false;
fontconfig.defaultFonts = {
monospace = ["Iosevka"];
};
};
}

View file

@ -0,0 +1,3 @@
{
nixpkgs.hostPlatform.system = "x86_64-linux";
}

View file

@ -0,0 +1,11 @@
{inputs, ...}: {
imports = [
inputs.nixos-wsl.nixosModules.default
];
wsl = {
enable = true;
defaultUser = "xun";
startMenuLaunchers = true;
};
}

View file

@ -0,0 +1,107 @@
{
lib,
inputs,
systemProfiles,
specialArgs,
self,
homeSuites,
...
}: {
imports = with systemProfiles; [
./hardware.nix
./hibernate-boot.nix
./samba-mount.nix
inputs.stylix.nixosModules.stylix
secrets.default
secrets.nixdesk
core.security
core.users
core.ssh
core.locale
nix.default
programs.zsh
core.tools
core.compat
core.boot
core.docs
core.gvfs
nix.gc
hardware.graphics
hardware.steam-hardware
hardware.bluetooth
hardware.qmk
network.networkd
network.avahi
network.localsend
network.tailscale
network.goldberg
desktop.sway
programs.dconf
programs.fonts
programs.home-manager
# programs.qt
programs.adb
programs.kanidm
programs.openrgb
programs.tools
programs.thunar
programs.corectrl
services.default
services.pipewire
services.flatpak
services.syncthing
services.waydroid
services.virt-manager
services.sunshine
# network.wifi
themes.dark
programs.gamemode
programs.gamescope
programs.steam
programs.RE
{
home-manager = {
backupFileExtension = "hm-backup";
users.xun.imports = [
homeSuites.nixdesk
inputs.sops-nix.homeManagerModules.sops
{home.stateVersion = "23.11";}
];
extraSpecialArgs = specialArgs;
};
}
];
networking.hostName = "nixdesk";
nixpkgs.config = {
rocmSupport = true;
allowUnfreePredicate = pkg:
builtins.elem (lib.getName pkg) [
"steam"
"steam-unwrapped"
"discord"
"obsidian"
"rider"
"android-studio-stable"
];
android_sdk.accept_license = true;
};
networking.interfaces.eno1.wakeOnLan.enable = true;
system.stateVersion = "23.11";
}

View file

@ -0,0 +1,88 @@
{
inputs,
config,
pkgs,
lib,
...
}: {
imports = [
inputs.hardware.nixosModules.common-cpu-amd
inputs.hardware.nixosModules.common-gpu-amd
inputs.hardware.nixosModules.common-pc-ssd
inputs.hardware.nixosModules.gigabyte-b550
];
boot = {
kernelPackages = pkgs.linuxPackages_latest;
initrd = {
availableKernelModules = [
"nvme"
"xhci_pci"
"ahci"
"usb_storage"
"usbhid"
"sd_mod"
];
kernelModules = ["amdgpu"];
};
kernelModules = ["kvm-amd"];
extraModulePackages = with config.boot.kernelPackages; [
rtl88xxau-aircrack # usb wifi card
];
loader = {
timeout = 10;
systemd-boot = {
enable = true;
consoleMode = "max";
configurationLimit = 120;
editor = false;
};
efi = {
canTouchEfiVariables = true;
efiSysMountPoint = "/boot";
};
};
};
fileSystems = {
"/" = {
device = "/dev/disk/by-uuid/d87276c0-ef9c-422e-b2de-effc1b47c654";
fsType = "btrfs";
options = ["subvol=root" "compress=zstd"];
};
"/home" = {
device = "/dev/disk/by-uuid/d87276c0-ef9c-422e-b2de-effc1b47c654";
fsType = "btrfs";
options = ["subvol=home" "compress=zstd"];
};
"/nix" = {
device = "/dev/disk/by-uuid/d87276c0-ef9c-422e-b2de-effc1b47c654";
fsType = "btrfs";
options = ["subvol=nix" "compress=zstd" "noatime"];
};
"/.swapvol" = {
device = "/dev/disk/by-uuid/d87276c0-ef9c-422e-b2de-effc1b47c654";
fsType = "btrfs";
options = ["subvol=swap" "noatime"];
};
"/boot" = {
device = "/dev/disk/by-uuid/588B-CB97";
fsType = "vfat";
};
};
boot.resumeDevice = "/dev/disk/by-uuid/d87276c0-ef9c-422e-b2de-effc1b47c654";
# btrfs inspect-internal map-swapfile -r /.swapvol/swapfile
boot.kernelParams = ["resume_offset=76293376"];
swapDevices = lib.singleton {
device = "/.swapvol/swapfile";
};
hardware.enableRedistributableFirmware = true;
services.xserver.videoDrivers = ["amdgpu"];
nixpkgs.hostPlatform.system = "x86_64-linux";
hardware.cpu.amd.updateMicrocode = true;
}

View file

@ -0,0 +1,28 @@
{pkgs, ...}: {
# hibernate and reboot to firmware
# this allows me to save linux state and boot into another os (such as windows)
# make sure not to mount any filesystems from the other os or you risk losing data
environment.systemPackages = [
(pkgs.writeShellScriptBin "hib-boot" ''
set -e
if [ ! -v 1 ]; then
echo "no argument provided"
echo "please provide the id for the os you want to boot"
echo "these are the valid id's:"
echo ""
${pkgs.efibootmgr}/bin/efibootmgr
exit
fi
if [ ! -w /sys/power/disk -o ! -w /sys/power/state ]; then
echo "you lack permission to write to /sys/power/{disk,state}, are you not running this script as root?"
exit
fi
${pkgs.efibootmgr}/bin/efibootmgr -n "$1" >/dev/null
echo reboot >/sys/power/disk
echo disk >/sys/power/state
'')
];
}

View file

@ -0,0 +1,72 @@
{
config,
pkgs,
...
}: {
environment.systemPackages = [pkgs.cifs-utils];
systemd.mounts = [
{
description = "smb hopper transmission download directory";
what = "//192.168.50.97/transmission"; # hopper local ip
where = "/server/transmission";
type = "cifs";
options = "uid=xun,gid=users,credentials=${config.sops.secrets.samba.path}";
}
{
description = "smb hopper vault";
what = "//192.168.50.97/vault"; # hopper local ip
where = "/server/vault";
type = "cifs";
options = "uid=xun,gid=users,credentials=${config.sops.secrets.samba.path}";
}
{
description = "smb hopper library";
what = "//192.168.50.97/library"; # hopper local ip
where = "/server/library";
type = "cifs";
options = "uid=xun,gid=users,credentials=${config.sops.secrets.samba.path},vers=3.0";
}
{
description = "smb hopper slskd files";
what = "//192.168.50.97/slskd"; # hopper local ip
where = "/server/slskd";
type = "cifs";
options = "uid=xun,gid=users,credentials=${config.sops.secrets.samba.path}";
}
];
systemd.automounts = [
{
requires = ["network-online.target"];
where = "/server/transmission";
wantedBy = ["multi-user.target"];
automountConfig = {
TimeoutIdleSec = "10min";
};
}
{
requires = ["network-online.target"];
where = "/server/vault";
wantedBy = ["multi-user.target"];
automountConfig = {
TimeoutIdleSec = "10min";
};
}
{
requires = ["network-online.target"];
where = "/server/library";
wantedBy = ["multi-user.target"];
automountConfig = {
TimeoutIdleSec = "10min";
};
}
{
requires = ["network-online.target"];
where = "/server/slskd";
wantedBy = ["multi-user.target"];
automountConfig = {
TimeoutIdleSec = "10min";
};
}
];
}