diff --git a/hosts/fw.cloonar.com/configuration.nix b/hosts/fw.cloonar.com/configuration.nix
index e8224fd..969b1fb 100644
--- a/hosts/fw.cloonar.com/configuration.nix
+++ b/hosts/fw.cloonar.com/configuration.nix
@@ -23,17 +23,18 @@
./modules/omada.nix
# git
- ./modules/gitea.nix
+ # ./modules/gitea.nix
# ./modules/drone/server.nix
# ./modules/drone/runner.nix
./modules/fwmetrics.nix
+ ./modules/hypervisor
# home assistant
- ./modules/home-assistant.nix
- ./modules/mopidy.nix
- ./modules/mosquitto.nix
- ./modules/snapserver.nix
- ./modules/deconz
+ # ./modules/home-assistant.nix
+ # ./modules/mopidy.nix
+ # ./modules/mosquitto.nix
+ # ./modules/snapserver.nix
+ # ./modules/deconz
./hardware-configuration.nix
];
diff --git a/hosts/fw.cloonar.com/modules/hypervisor/baseline-qemu.nix b/hosts/fw.cloonar.com/modules/hypervisor/baseline-qemu.nix
new file mode 100644
index 0000000..223d9aa
--- /dev/null
+++ b/hosts/fw.cloonar.com/modules/hypervisor/baseline-qemu.nix
@@ -0,0 +1,12 @@
+{
+ imports = [ ./baseline.nix ];
+ fileSystems."/".device = "/dev/disk/by-label/nixos";
+ boot.initrd.availableKernelModules = [ "xhci_pci" "ehci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" "virtio_balloon" "virtio_blk" "virtio_pci" "virtio_ring" ];
+ boot.loader = {
+ grub = {
+ version = 2;
+ device = "/dev/vda";
+ };
+ timeout = 0;
+ };
+}
diff --git a/hosts/fw.cloonar.com/modules/hypervisor/baseline.nix b/hosts/fw.cloonar.com/modules/hypervisor/baseline.nix
new file mode 100644
index 0000000..e3d2ffd
--- /dev/null
+++ b/hosts/fw.cloonar.com/modules/hypervisor/baseline.nix
@@ -0,0 +1,11 @@
+{
+ time.timeZone = "Europe/Vienna";
+ networking.hostName = "vm";
+ services.openssh.enable = true;
+ users.users.root.openssh.authorizedKeys.keys = [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDN/2SAFm50kraB1fepAizox/QRXxB7WbqVbH+5OPalDT47VIJGNKOKhixQoqhABHxEoLxdf/C83wxlCVlPV9poLfDgVkA3Lyt5r3tSFQ6QjjOJAgchWamMsxxyGBedhKvhiEzcr/Lxytnoz3kjDG8fqQJwEpdqMmJoMUfyL2Rqp16u+FQ7d5aJtwO8EUqovhMaNO7rggjPpV/uMOg+tBxxmscliN7DLuP4EMTA/FwXVzcFNbOx3K9BdpMRAaSJt4SWcJO2cS2KHA5n/H+PQI7nz5KN3Yr/upJN5fROhi/SHvK39QOx12Pv7FCuWlc+oR68vLaoCKYhnkl3DnCfc7A7"
+ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIRQuPqH5fdX3KEw7DXzWEdO3AlUn1oSmtJtHB71ICoH Generated By Termius"
+ ];
+
+ system.stateVersion = "22.05";
+}
diff --git a/hosts/fw.cloonar.com/modules/hypervisor/default.nix b/hosts/fw.cloonar.com/modules/hypervisor/default.nix
new file mode 100644
index 0000000..4113f4b
--- /dev/null
+++ b/hosts/fw.cloonar.com/modules/hypervisor/default.nix
@@ -0,0 +1,81 @@
+{ pkgs, ... }:
+let
+ hostNix = "server";
+ guests = {
+ git = {
+ memory = 16;
+ mac = "ed:22:4a:96:c3:01";
+ diskSize = 128;
+ };
+ };
+in
+{
+ boot.kernelModules = [ "kvm-amd" "kvm-intel" ];
+ virtualisation.libvirtd.enable = true;
+
+ systemd.services = lib.mapAttrs' (name: guest: lib.nameValuePair "libvirtd-guest-${name}" {
+ after = [ "libvirtd.service" ];
+ requires = [ "libvirtd.service" ];
+ wantedBy = [ "multi-user.target" ];
+ serviceConfig = {
+ Type = "oneshot";
+ RemainAfterExit = "yes";
+ };
+ script =
+ let
+ xml = pkgs.writeText "libvirt-guest-${name}.xml"
+ ''
+
+ ${name}
+ UUID
+
+ hvm
+
+ ${guest.memory}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ '';
+ in
+ ''
+ if ! ${pkgs.libvirt}/bin/virsh vol-key 'guest-${name}' --pool guests &> /dev/null; then
+ ${pkgs.libvirt}/bin/virsh vol-create-as guests 'guest-${name}' '${guest.diskSize}GiB'
+ ${pkgs.qemu}/bin/qemu-img convert /etc/virt/base-images/baseline.qcow2 '/dev/${hostName}/guest-${name}'
+ fi
+ uuid="$(${pkgs.libvirt}/bin/virsh domuuid '${name}' || true)"
+ ${pkgs.libvirt}/bin/virsh define <(sed "s/UUID/$uuid/" '${xml}')
+ ${pkgs.libvirt}/bin/virsh start '${name}'
+ '';
+ preStop =
+ ''
+ ${pkgs.libvirt}/bin/virsh shutdown '${name}'
+ let "timeout = $(date +%s) + 10"
+ while [ "$(${pkgs.libvirt}/bin/virsh list --name | grep --count '^${name}$')" -gt 0 ]; do
+ if [ "$(date +%s)" -ge "$timeout" ]; then
+ # Meh, we warned it...
+ ${pkgs.libvirt}/bin/virsh destroy '${name}'
+ else
+ # The machine is still running, let's give it some time to shut down
+ sleep 0.5
+ fi
+ done
+ '';
+ }) guests;
+
+ environment.etc."virt/base-images/baseline.qcow2".source = "${import ./image.nix args}/baseline.qcow2";
+
+}
diff --git a/hosts/fw.cloonar.com/modules/hypervisor/image.nix b/hosts/fw.cloonar.com/modules/hypervisor/image.nix
new file mode 100644
index 0000000..74ec921
--- /dev/null
+++ b/hosts/fw.cloonar.com/modules/hypervisor/image.nix
@@ -0,0 +1,98 @@
+{ pkgs ? import {}, system ? builtins.currentSystem, ... }:
+let
+ config = (import {
+ inherit system;
+ modules = [ {
+ imports = [ ./baseline-qemu.nix ];
+
+ # We want our template image to be as small as possible, but the deployed image should be able to be
+ # of any size. Hence we resize on the first boot.
+ systemd.services.resize-main-fs = {
+ wantedBy = [ "multi-user.target" ];
+ serviceConfig.Type = "oneshot";
+ script =
+ ''
+ # Resize main partition to fill whole disk
+ echo ", +" | ${pkgs.utillinux}/bin/sfdisk /dev/vda --no-reread -N 1
+ ${pkgs.parted}/bin/partprobe
+ # Resize filesystem
+ ${pkgs.e2fsprogs}/bin/resize2fs /dev/vda1
+ '';
+ };
+ } ];
+ }).config;
+in pkgs.vmTools.runInLinuxVM (
+ pkgs.runCommand "nixos-sun-baseline-image"
+ {
+ memSize = 768;
+ preVM =
+ ''
+ mkdir $out
+ diskImage=image.qcow2
+ ${pkgs.vmTools.qemu}/bin/qemu-img create -f qcow2 $diskImage 1G
+ mv closure xchg/
+ '';
+ postVM =
+ ''
+ echo compressing VM image...
+ ${pkgs.vmTools.qemu}/bin/qemu-img convert -c $diskImage -O qcow2 $out/baseline.qcow2
+ '';
+ buildInputs = [ pkgs.utillinux pkgs.perl pkgs.parted pkgs.e2fsprogs ];
+ exportReferencesGraph =
+ [ "closure" config.system.build.toplevel ];
+ }
+ ''
+ # Create the partition
+ parted /dev/vda mklabel msdos
+ parted /dev/vda -- mkpart primary ext4 1M -1s
+ . /sys/class/block/vda1/uevent
+ mknod /dev/vda1 b $MAJOR $MINOR
+
+ # Format the partition
+ mkfs.ext4 -L nixos /dev/vda1
+ mkdir /mnt
+ mount /dev/vda1 /mnt
+
+ for dir in dev proc sys; do
+ mkdir /mnt/$dir
+ mount --bind /$dir /mnt/$dir
+ done
+
+ storePaths=$(perl ${pkgs.pathsFromGraph} /tmp/xchg/closure)
+ echo filling Nix store...
+ mkdir -p /mnt/nix/store
+ set -f
+ cp -prd $storePaths /mnt/nix/store
+ # The permissions will be set up incorrectly if the host machine is not running NixOS
+ chown -R 0:30000 /mnt/nix/store
+
+ mkdir -p /mnt/etc/nix
+ echo 'build-users-group = ' > /mnt/etc/nix/nix.conf
+
+ # at least since nix-2.3.16 we need a root user in the chroot
+ # or else `nix-store --load-db` will fail with "cannot figure out user name"
+ chroot /mnt ${pkgs.shadow}/bin/useradd -u 0 root
+
+ # Register the paths in the Nix database.
+ printRegistration=1 perl ${pkgs.pathsFromGraph} /tmp/xchg/closure | \
+ chroot /mnt ${config.nix.package.out}/bin/nix-store --load-db
+
+ # Create the system profile to allow nixos-rebuild to work.
+ chroot /mnt ${config.nix.package.out}/bin/nix-env \
+ -p /nix/var/nix/profiles/system --set ${config.system.build.toplevel}
+
+ # `nixos-rebuild' requires an /etc/NIXOS.
+ mkdir -p /mnt/etc/nixos
+ touch /mnt/etc/NIXOS
+
+ # `switch-to-configuration' requires a /bin/sh
+ mkdir -p /mnt/bin
+ ln -s ${config.system.build.binsh}/bin/sh /mnt/bin/sh
+
+ # Generate the GRUB menu.
+ chroot /mnt ${config.system.build.toplevel}/bin/switch-to-configuration boot
+
+ umount /mnt/{proc,dev,sys}
+ umount /mnt
+ ''
+)