Compare commits

...

10 Commits

Author SHA1 Message Date
Markus Schmidl d413aeb5c0 use unevaluatedNixosConfigurations instead 2023-11-26 02:51:36 +01:00
Markus Schmidl bd1e6d73c9 more more things into registry 2023-11-26 02:38:52 +01:00
Markus Schmidl 4f916eba66 make nix things fast. a lot of magic involved 2023-11-26 01:22:19 +01:00
Markus Schmidl 38185e5ac1 remove module check 2023-11-25 23:51:08 +01:00
Markus Schmidl ed97405de6 remove obsolete options 2023-11-25 23:10:37 +01:00
Markus Schmidl 61cc624a30 move more traffic-stop-box config to registry 2023-11-25 23:08:05 +01:00
Markus Schmidl 2b3d6cb7d1 start to move traffic-stop-boxes to registry 2023-11-25 22:59:51 +01:00
Markus Schmidl 0b67c90d2a move data-hoarder postgres to registry 2023-11-25 21:39:32 +01:00
Markus Schmidl a278d40551 remove old group 2023-11-25 21:30:26 +01:00
Markus Schmidl 0c8b910ec5 start implementing data-hoarder registry 2023-11-25 21:21:31 +01:00
56 changed files with 426 additions and 414 deletions

114
flake.nix
View File

@ -129,6 +129,7 @@
pkgs = nixpkgs.legacyPackages."x86_64-linux";
lib = pkgs.lib;
registry = import ./registry;
data-hoarder-modules = [
./modules/data-hoarder
@ -168,19 +169,15 @@
];
# function that generates a system with the given number
generate_system = (id: arch: monitoring:
generate_system = (id:
let
myRegistry = registry.traffic-stop-box."${toString id}";
in
{
"traffic-stop-box-${toString id}" = nixpkgs.lib.nixosSystem
{
system = arch;
specialArgs = inputs;
"${myRegistry.hostName}" = {
system = myRegistry.arch;
specialArgs = { inherit self inputs; registry = myRegistry; };
modules =
let
monitoring-mod =
if monitoring
then { deployment-TLMS.monitoring.enable = true; }
else { deployment-TLMS.monitoring.enable = false; };
in
[
# box-specific config
./hosts/traffic-stop-box/${toString id}
@ -190,87 +187,41 @@
./modules/traffic-stop-box
./modules/TLMS
{
deployment-TLMS.systemNumber = id;
deployment-TLMS.monitoring.enable = myRegistry.monitoring;
}
monitoring-mod
] ++ stop-box-modules;
};
}
);
id_list = [
{
# Barkhausen Bau
id = 0;
arch = "x86_64-linux";
monitoring = true;
}
{
# Zentralwerk
id = 1;
arch = "x86_64-linux";
monitoring = true;
}
{
# Wundstr. 9
id = 4;
arch = "x86_64-linux";
monitoring = true;
}
{
# Hannover Bredero Hochhaus City
id = 8;
arch = "aarch64-linux";
monitoring = false;
}
{
# Hannover Bredero Hochhaus Wider Area
id = 9;
arch = "aarch64-linux";
monitoring = false;
}
];
# list of traffic-stop-box-$id that will be built
stop_box_ids = [ 0 1 4 8 9 ];
# attribute set of all traffic stop boxes
stop_boxes = nixpkgs.lib.foldl (x: y: nixpkgs.lib.mergeAttrs x (generate_system y.id y.arch y.monitoring)) { } id_list;
r09_receivers = nixpkgs.lib.foldl (x: id: nixpkgs.lib.mergeAttrs x (generate_system id)) { } stop_box_ids;
packages = {
staging-microvm = self.nixosConfigurations.staging-data-hoarder.config.microvm.declaredRunner;
data-hoarder-microvm = self.nixosConfigurations.data-hoarder.config.microvm.declaredRunner;
}
// (import ./pkgs/deployment.nix { inherit self pkgs lib; })
// (lib.foldl (x: y: lib.mergeAttrs x { "${y.config.system.name}-vm" = y.config.system.build.vm; }) { } (lib.attrValues self.nixosConfigurations));
in
{
packages."aarch64-linux".box8 = self.nixosConfigurations.traffic-stop-box-8.config.system.build.sdImage;
packages."aarch64-linux".box9 = self.nixosConfigurations.traffic-stop-box-9.config.system.build.sdImage;
packages."x86_64-linux" = packages;
nixosConfigurations = stop_boxes // {
data-hoarder = nixpkgs.lib.nixosSystem {
unevaluatedNixosConfigurations = r09_receivers // {
data-hoarder = {
system = "x86_64-linux";
specialArgs = { inherit inputs self; };
specialArgs = { inherit inputs self; registry = registry.data-hoarder; };
modules = [
microvm.nixosModules.microvm
./hosts/data-hoarder
] ++ data-hoarder-modules;
};
staging-data-hoarder = nixpkgs.lib.nixosSystem {
staging-data-hoarder = {
system = "x86_64-linux";
specialArgs = { inherit inputs self; };
specialArgs = { inherit inputs self; registry = registry.staging-data-hoarder; };
modules = [
./hosts/staging-data-hoarder
microvm.nixosModules.microvm
] ++ data-hoarder-modules;
};
notice-me-senpai = nixpkgs.lib.nixosSystem {
notice-me-senpai = {
system = "x86_64-linux";
specialArgs = { inherit inputs self; };
specialArgs = { inherit inputs self; registry = registry.notice-me-senpai; };
modules = [
sops-nix.nixosModules.sops
./modules/TLMS
@ -278,9 +229,9 @@
];
};
tram-borzoi = nixpkgs.lib.nixosSystem {
tram-borzoi = {
system = "x86_64-linux";
specialArgs = { inherit inputs self; };
specialArgs = { inherit inputs self; registry = registry.tram-borzoi; };
modules = [
sops-nix.nixosModules.sops
microvm.nixosModules.microvm
@ -293,20 +244,35 @@
];
};
uranus = nixpkgs.lib.nixosSystem {
uranus = {
system = "x86_64-linux";
specialArgs = { inherit inputs self; };
specialArgs = { inherit inputs self; registry = registry.uranus; };
modules = [
sops-nix.nixosModules.sops
microvm.nixosModules.microvm
./modules/TLMS
./hosts/uranus
{ deployment-TLMS.monitoring.enable = true; }
];
};
};
in
{
inherit unevaluatedNixosConfigurations;
packages."aarch64-linux".box8 = self.nixosConfigurations.traffic-stop-box-8.config.system.build.sdImage;
packages."aarch64-linux".box9 = self.nixosConfigurations.traffic-stop-box-9.config.system.build.sdImage;
packages."x86_64-linux" = {
staging-microvm = self.nixosConfigurations.staging-data-hoarder.config.microvm.declaredRunner;
data-hoarder-microvm = self.nixosConfigurations.data-hoarder.config.microvm.declaredRunner;
};
# these are in the app declaration as nix before 2.19 tries to find attrPaths in packages first.
# here we evaluate over all nixos configurations making this extremely slow
apps."x86_64-linux" = (import ./pkgs/deployment.nix { inherit self pkgs lib; });
nixosConfigurations = lib.attrsets.mapAttrs (name: value: nixpkgs.lib.nixosSystem value) unevaluatedNixosConfigurations;
hydraJobs =
let

View File

@ -1,19 +1,12 @@
{ config, ... }:
let
port = 51820;
in
{ config, registry, ... }:
{
boot.kernel.sysctl."net.ipv4.ip_forward" = 1;
networking.firewall.allowedUDPPorts = [ port ];
networking.firewall.allowedUDPPorts = [ registry.publicWireguardEndpoint.port ];
deployment-TLMS.net.wg = {
ownEndpoint.host = "endpoint.tlm.solutions";
ownEndpoint.port = port;
addr4 = "10.13.37.1";
prefix4 = 24;
privateKeyFile = config.sops.secrets.wg-seckey.path;
publicKey = "WDvCObJ0WgCCZ0ORV2q4sdXblBd8pOPZBmeWr97yphY=";
extraPeers = [
{
# Tassilo

View File

@ -1,8 +1,6 @@
{ self, pkgs, lib, ... }: {
sops.defaultSopsFile = self + /secrets/notice-me-senpai/secrets.yaml;
networking.hostName = "notice-me-senpai";
boot = {
tmp.cleanOnBoot = true;
kernelPackages = pkgs.linuxPackages_latest;

View File

@ -1,4 +1,4 @@
{ config, lib, self, ... }:
{ config, lib, self, registry, ... }:
let
grafana_host = "grafana.tlm.solutions";
in
@ -18,7 +18,7 @@ in
{
enable = true;
port = 9501;
listenAddress = config.deployment-TLMS.net.wg.addr4;
listenAddress = registry.wgAddr4;
globalConfig = {
scrape_interval = "131s";
};
@ -26,7 +26,7 @@ in
let
### Autogenerate prometheus scraper config
# currently only wireguard-connected machines are getting scraped.
filterWgHosts = k: v: !(builtins.isNull v.config.deployment-TLMS.net.wg.addr4);
filterWgHosts = k: v: !(builtins.isNull v._module.specialArgs.registry.wgAddr4);
wgHosts = lib.filterAttrs filterWgHosts self.nixosConfigurations;
# collect active prometheus exporters
@ -38,7 +38,7 @@ in
job_name = "${hostname}_${exporter}";
static_configs =
let
ip = wgHosts."${hostname}".config.deployment-TLMS.net.wg.addr4;
ip = wgHosts."${hostname}"._module.specialArgs.registry.wgAddr4;
in
[{
targets = [ "${ip}:${toString exporter-cfg.port}" ];
@ -81,7 +81,7 @@ in
loki = {
enable = true;
configuration = {
server.http_listen_port = 3100;
server.http_listen_port = registry.port-loki;
auth_enabled = false;
ingester = {

View File

@ -5,9 +5,7 @@
networking.wireguard.enable = true;
deployment-TLMS.net.wg = {
addr4 = "10.13.37.200";
prefix4 = 24;
privateKeyFile = config.sops.secrets.wg-seckey.path;
publicKey = "z2E9TjL9nn0uuLmyQexqddE6g8peB5ENyf0LxpMolD4=";
};
}

View File

@ -42,8 +42,6 @@ in
}];
};
networking.hostName = "staging-data-hoarder";
time.timeZone = "Europe/Berlin";
networking.useNetworkd = true;
@ -70,10 +68,8 @@ in
};
wg = {
addr4 = "10.13.37.5";
prefix4 = 24;
privateKeyFile = config.sops.secrets.wg-seckey.path;
publicKey = "48hc7DVnUh2DHYhrxrNtNzj05MRecJO52j2niPImvkU=";
};
};

View File

@ -3,7 +3,5 @@
"${self}/hardware/dell-wyse-3040.nix"
];
deployment-TLMS.net.wg.publicKey = "qyStvzZdoqcjJJQckw4ZwvsQUa+8TBWtnsRxURqanno=";
TLMS.telegramDecoder.errorCorrection = false;
}

View File

@ -3,7 +3,5 @@
"${self}/hardware/dell-wyse-3040.nix"
];
deployment-TLMS.net.wg.publicKey = "dOPobdvfphx0EHmU7dd5ihslFzZi17XgRDQLMIUYa1w=";
TLMS.telegramDecoder.errorCorrection = false;
}

View File

@ -2,6 +2,4 @@
imports = [
"${self}/hardware/dell-wyse-3040.nix"
];
deployment-TLMS.net.wg.publicKey = "dL9JGsBhaTOmXgGEH/N/GCHbQgVHEjBvIMaRtCsHBHw=";
}

View File

@ -2,6 +2,4 @@
imports = [
"${self}/hardware/dell-wyse-3040.nix"
];
deployment-TLMS.net.wg.publicKey = "4TUQCToGNhjsCgV9elYE/91Vd/RvMgvMXtF/1Dzlvxo=";
}

View File

@ -2,6 +2,4 @@
imports = [
"${self}/hardware/rpi-3b-4b.nix"
];
deployment-TLMS.net.wg.publicKey = "w3AT3EahW1sCK8ZsR7sDTcQj1McXYeWx7fnfQFA7i3o=";
}

View File

@ -23,7 +23,5 @@ let eth = "enp1s0"; in
];
};
deployment-TLMS.net.wg.publicKey = "B0wPH0jUxaatRncHMkgDEQ+DzvlbTBrVJY4etxqQgG8=";
TLMS.telegramDecoder.errorCorrection = false;
}

View File

@ -10,6 +10,4 @@
"sk-ssh-ed25519@openssh.com aaaagnnrlxnzac1lzdi1nte5qg9wzw5zc2guy29taaaaili3ylty7fwvohtwx8511v+gbtlzzmuv505fi1pj53v6aaaabhnzado="
"sk-ssh-ed25519@openssh.com aaaagnnrlxnzac1lzdi1nte5qg9wzw5zc2guy29taaaaipzbd00cbfpxzuc8eb6sljaafnf1hgs6vci1rzcncyocaaaabhnzado="
];
deployment-TLMS.net.wg.publicKey = "NuLDNmxuHHzDXJSIOPSoihEhLWjARRtavuQvWirNR2I=";
}

View File

@ -10,6 +10,4 @@
"sk-ssh-ed25519@openssh.com aaaagnnrlxnzac1lzdi1nte5qg9wzw5zc2guy29taaaaili3ylty7fwvohtwx8511v+gbtlzzmuv505fi1pj53v6aaaabhnzado="
"sk-ssh-ed25519@openssh.com aaaagnnrlxnzac1lzdi1nte5qg9wzw5zc2guy29taaaaipzbd00cbfpxzuc8eb6sljaafnf1hgs6vci1rzcncyocaaaabhnzado="
];
deployment-TLMS.net.wg.publicKey = "sMsdY7dSjlYeIFMqjkh4pJ/ftAYXlyRuxDGbdnGLpEQ=";
}

View File

@ -9,6 +9,4 @@
users.users.root.openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJO/8PRzEqW20vnADv5xJrV5AlQ9bS8251AyQACyFMz+ dumbdvb_clarity"
];
deployment-TLMS.net.wg.publicKey = "dL9JGsBhaTOmXgGEH/N/GCHbQgVHEjBvIMaRtCsHBHw=";
}

View File

@ -9,6 +9,4 @@
users.users.root.openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJO/8PRzEqW20vnADv5xJrV5AlQ9bS8251AyQACyFMz+ dumbdvb_clarity"
];
deployment-TLMS.net.wg.publicKey = "j2hGr2rVv7T9kJE15c2IFWjmk0dXuJPev2BXiHZUKk8=";
}

View File

@ -46,13 +46,10 @@ in
}];
};
networking.hostName = "tram-borzoi";
time.timeZone = "Europe/Berlin";
networking.useNetworkd = true;
sops.defaultSopsFile = ../../secrets/tram-borzoi/secrets.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
@ -79,10 +76,8 @@ in
};
wg = {
addr4 = "10.13.37.8";
prefix4 = 24;
privateKeyFile = config.sops.secrets.wg-seckey.path;
publicKey = "wCW+r5kAaIarvZUWf4KsJNetyHobP0nNy5QOhqmsCCs=";
};
};

View File

@ -14,7 +14,7 @@
port = 5432;
authentication =
let
senpai-ip = self.nixosConfigurations.notice-me-senpai.config.deployment-TLMS.net.wg.addr4;
senpai-ip = self.unevaluatedNixosConfigurations.notice-me-senpai.specialArgs.registry.wgAddr4;
# TODO: fixme
uranus-ip = "10.13.37.9";
in

View File

@ -49,8 +49,6 @@ in
];
};
networking.hostName = "uranus";
time.timeZone = "Europe/Berlin";
networking.useNetworkd = true;
@ -82,10 +80,8 @@ in
};
wg = {
addr4 = "10.13.37.9";
prefix4 = 24;
privateKeyFile = config.sops.secrets.wg-seckey.path;
publicKey = "KwCG5CWPdNmrjEOYJYD2w0yhzoWpYHrjGbstdT5+pFk=";
};
};

View File

@ -1,4 +1,4 @@
{ pkgs, config, lib, ... }:
{ pkgs, config, lib, registry, ... }:
let
regMotd = ''
_._ _,-'""`-._
@ -31,6 +31,8 @@ in
networking.useNetworkd = true;
networking.hostName = registry.hostName;
console = {
font = "Lat2-Terminus16";
keyMap = "uk";

View File

@ -1,12 +1,6 @@
{ lib, ... }:
with lib; {
options = {
deployment-TLMS.systemNumber = mkOption {
type = types.int;
default = 0;
description = "number of the system";
};
deployment-TLMS.domain = mkOption {
type = types.str;
default = "tlm.solutions";

View File

@ -1,7 +1,7 @@
{ lib, config, self, ... }:
{ lib, config, self, registry, ... }:
let
cfg = config.deployment-TLMS.monitoring;
monitoring-host = self.nixosConfigurations.notice-me-senpai.config;
monitoring-host-registry = self.unevaluatedNixosConfigurations.notice-me-senpai.specialArgs.registry;
in
{
options.deployment-TLMS.monitoring = with lib; {
@ -32,7 +32,7 @@ in
config =
let
wg-addr-pred = lib.assertMsg (!(isNull config.deployment-TLMS.net.wg.addr4)) "to add system to monitoring, add it to TLMS wireguard first!";
wg-addr-pred = lib.assertMsg (registry ? wgAddr4) "to add system to monitoring, add it to TLMS wireguard first!";
in
lib.mkIf (cfg.enable && wg-addr-pred) {
# prometheus node exporter
@ -40,7 +40,7 @@ in
node = {
enable = true;
port = cfg.node-exporter.port;
listenAddress = config.deployment-TLMS.net.wg.addr4;
listenAddress = registry.wgAddr4;
enabledCollectors = [
"systemd"
];
@ -59,7 +59,7 @@ in
filename = "/tmp/positions.yaml";
};
clients = [{
url = "http://${monitoring-host.deployment-TLMS.net.wg.addr4}:${toString monitoring-host.services.loki.configuration.server.http_listen_port}/loki/api/v1/push";
url = "http://${monitoring-host-registry.wgAddr4}:${toString monitoring-host-registry.port-loki}/loki/api/v1/push";
}];
scrape_configs = [{
job_name = "journal";

View File

@ -1,32 +1,13 @@
{ lib, config, self, ... }:
{ lib, config, self, registry, ... }:
let
cfg = config.deployment-TLMS.net.wg;
in
{
options.deployment-TLMS.net.wg = with lib; {
ownEndpoint.host = mkOption {
type = types.nullOr types.str;
default = null;
};
ownEndpoint.port = mkOption {
type = types.port;
default = 51820;
};
publicKey = mkOption {
type = types.str;
default = "";
description = "own public key";
};
privateKeyFile = mkOption {
type = types.either types.str types.path;
};
addr4 = mkOption {
type = types.nullOr types.str;
default = null;
};
prefix4 = mkOption {
type = types.int;
default = 24;
@ -55,35 +36,35 @@ in
keepalive = 25;
# helpers
peer-systems = (lib.filter (x: (x.config.deployment-TLMS.net.wg.addr4 != cfg.addr4) && (!isNull x.config.deployment-TLMS.net.wg.addr4))
(lib.attrValues self.nixosConfigurations));
registries = builtins.attrValues (builtins.mapAttrs (name: value: value.specialArgs.registry) self.unevaluatedNixosConfigurations);
endpoint =
# find all other systems registries with wireguard
peerSystemRegistries = (lib.filter (x: (x.wgAddr4 != registry.wgAddr4) && (!isNull x.wgAddr4)) registries);
# find all endpoint registries
endpointRegistries =
let
ep = (lib.filter
(x:
x.config.deployment-TLMS.net.wg.addr4 != cfg.addr4
&& (!isNull x.config.deployment-TLMS.net.wg.ownEndpoint.host))
(lib.attrValues self.nixosConfigurations));
(x: x.wgAddr4 != registry.wgAddr4 && (!isNull x.publicWireguardEndpoint)) registries);
in
assert lib.assertMsg (lib.length ep == 1) "there should be exactly one endpoint"; ep;
peers = map
(x: {
wireguardPeerConfig = {
PublicKey = x.config.deployment-TLMS.net.wg.publicKey;
AllowedIPs = [ "${x.config.deployment-TLMS.net.wg.addr4}/32" ];
PublicKey = x.wireguardPublicKey;
AllowedIPs = [ "${x.wgAddr4}/32" ];
PersistentKeepalive = keepalive;
};
})
peer-systems;
peerSystemRegistries;
ep = [{
wireguardPeerConfig =
let x = lib.elemAt endpoint 0; in {
PublicKey = x.config.deployment-TLMS.net.wg.publicKey;
AllowedIPs = [ "${x.config.deployment-TLMS.net.wg.addr4}/${toString cfg.prefix4}" ];
Endpoint = with x.config.deployment-TLMS.net.wg.ownEndpoint; "${host}:${toString port}";
let x = lib.elemAt endpointRegistries 0; in {
PublicKey = x.wireguardPublicKey;
AllowedIPs = [ "${x.wgAddr4}/${toString cfg.prefix4}" ];
Endpoint = with x.publicWireguardEndpoint; "${host}:${toString port}";
PersistentKeepalive = keepalive;
};
}];
@ -98,7 +79,7 @@ in
dvbwg-wireguard = {
PrivateKeyFile = cfg.privateKeyFile;
} //
(if !isNull cfg.ownEndpoint.host then { ListenPort = cfg.ownEndpoint.port; } else { });
(if !isNull registry.publicWireguardEndpoint then { ListenPort = registry.publicWireguardEndpoint.port; } else { });
expeers = map
(x: {
@ -110,9 +91,9 @@ in
})
cfg.extraPeers;
peerconf = if isNull cfg.ownEndpoint.host then ep else (peers ++ expeers);
peerconf = if isNull registry.publicWireguardEndpoint then ep else (peers ++ expeers);
in
lib.mkIf (!isNull cfg.addr4) {
lib.mkIf (registry ? wgAddr4) {
networking.wireguard.enable = true;
networking.firewall.trustedInterfaces = [ dvbwg-name ];
@ -125,7 +106,7 @@ in
systemd.network.networks."30-${dvbwg-name}" = {
matchConfig.Name = dvbwg-name;
networkConfig = {
Address = "${cfg.addr4}/${toString cfg.prefix4}";
Address = "${registry.wgAddr4}/${toString cfg.prefix4}";
};
};
};

View File

@ -1,25 +1,15 @@
{ config, ... }:
let
service_number = 6;
in
{
{ config, registry, ... }: {
TLMS.bureaucrat = {
enable = true;
grpc = {
host = "127.0.0.1";
port = 50050 + service_number;
};
redis = {
host = config.services.redis.servers."state".bind;
port = config.services.redis.servers."state".port;
};
grpc = registry.grpc-chemo-bureaucrat;
redis = registry.redis-bureaucrat-lizard;
};
services = {
redis.servers."state" = {
redis.servers."state" = with registry.redis-bureaucrat-lizard; {
inherit port;
enable = true;
bind = "127.0.0.1";
port = 5314;
bind = host;
};
};
}

View File

@ -1,29 +1,16 @@
{ config, ... }:
let
service_number = 3;
in
{
{ config, registry, ... }: {
TLMS.chemo = {
inherit (registry.grpc-data_accumulator-chemo) host port;
enable = true;
host = "127.0.0.1";
port = 50050 + service_number;
database = {
host = "127.0.0.1";
port = config.services.postgresql.port;
passwordFile = config.sops.secrets.postgres_password.path;
user = "tlms";
database = "tlms";
};
database = registry.postgres;
GRPC = [
{
inherit (registry.grpc-chemo-bureaucrat) host port;
name = "BUREAUCRAT";
host = config.TLMS.bureaucrat.grpc.host;
port = config.TLMS.bureaucrat.grpc.port;
}
{
inherit (registry.grpc-chemo-funnel) host port;
name = "FUNNEL";
host = config.TLMS.funnel.GRPC.host;
port = config.TLMS.funnel.GRPC.port;
}
];
};

View File

@ -1,23 +1,12 @@
{ config, ... }:
{
{ config, registry, ... }: {
TLMS.dataAccumulator = {
inherit (registry.port-data_accumulator) host port;
enable = true;
host = "0.0.0.0";
port = 8080;
database = {
host = "127.0.0.1";
port = config.services.postgresql.port;
passwordFile = config.sops.secrets.postgres_password.path;
user = "tlms";
database = "tlms";
};
GRPC = [
{
name = "CHEMO";
host = config.TLMS.chemo.host;
port = config.TLMS.chemo.port;
}
];
database = registry.postgres;
GRPC = [{
inherit (registry.grpc-data_accumulator-chemo) host port;
name = "CHEMO";
}];
};
systemd.services."data-accumulator" = {
after = [ "postgresql.service" ];
@ -29,7 +18,10 @@
enable = true;
recommendedProxySettings = true;
virtualHosts = {
"dump.${(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ] config.deployment-TLMS.domain)}" = {
"dump.${
(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ]
config.deployment-TLMS.domain)
}" = {
enableACME = true;
forceSSL = true;
extraConfig = ''
@ -41,7 +33,8 @@
enableACME = true;
locations = {
"/" = {
proxyPass = with config.TLMS.dataAccumulator; "http://${host}:${toString port}/";
proxyPass = with registry.port-data_accumulator;
"http://${host}:${toString port}/";
};
};
};

View File

@ -1,17 +1,8 @@
{ config, ... }: {
{ config, registry, ... }: {
TLMS.datacare = {
enable = true;
http = {
host = "127.0.0.1";
port = 8070;
};
database = {
host = "127.0.0.1";
port = config.services.postgresql.port;
user = "tlms";
database = "tlms";
passwordFile = config.sops.secrets.postgres_password.path;
};
http = registry.port-datacare;
database = registry.postgres;
allowedIpsExport = [ "10.13.37.0/24" ];
saltFile = config.sops.secrets.postgres_password_hash_salt.path;
user = "datacare";
@ -22,13 +13,15 @@
wants = [ "postgresql.service" ];
};
services = {
nginx = {
enable = true;
recommendedProxySettings = true;
virtualHosts = {
"datacare.${(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ] config.deployment-TLMS.domain)}" = {
"datacare.${
(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ]
config.deployment-TLMS.domain)
}" = {
enableACME = true;
forceSSL = true;
extraConfig = ''
@ -40,7 +33,8 @@
enableACME = true;
locations = {
"/" = {
proxyPass = with config.TLMS.datacare.http; "http://${host}:${toString port}/";
proxyPass = with registry.port-data_accumulator;
"http://${host}:${toString port}/";
proxyWebsockets = true;
extraConfig = ''
more_set_headers "Access-Control-Allow-Credentials: true";

View File

@ -1,11 +1,13 @@
{ pkgs, config, ... }:
{
{ pkgs, config, ... }: {
services = {
nginx = {
enable = true;
recommendedProxySettings = true;
virtualHosts = {
"docs.${(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ] config.deployment-TLMS.domain)}" = {
"docs.${
(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ]
config.deployment-TLMS.domain)
}" = {
enableACME = true;
forceSSL = true;
extraConfig = ''

View File

@ -4,7 +4,10 @@
enable = true;
recommendedProxySettings = true;
virtualHosts = {
"files.${(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ] config.deployment-TLMS.domain)}" = {
"files.${
(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ]
config.deployment-TLMS.domain)
}" = {
enableACME = true;
forceSSL = true;
extraConfig = ''

View File

@ -2,7 +2,10 @@
services.nginx = {
enable = true;
virtualHosts = {
"kid.${(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ] config.deployment-TLMS.domain)}" = {
"kid.${
(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ]
config.deployment-TLMS.domain)
}" = {
enableACME = true;
forceSSL = true;
extraConfig = ''
@ -13,7 +16,10 @@
enableACME = true;
forceSSL = true;
locations."~ ^/(de|en)" = {
root = if (config.deployment-TLMS.domain == "tlm.solutions") then "${pkgs.kindergarten}" else "${pkgs.kindergarten-staging}";
root = if (config.deployment-TLMS.domain == "tlm.solutions") then
"${pkgs.kindergarten}"
else
"${pkgs.kindergarten-staging}";
# index = "index.html";
tryFiles = "$uri /$1/index.html =404";
extraConfig = ''

View File

@ -1,34 +1,22 @@
{ config, ... }:
let
service_number = 1;
in
{
{ config, registry, ... }: {
TLMS.lizard = {
enable = true;
http = {
host = "127.0.0.1";
port = 9000 + service_number;
};
http = { inherit (registry.port-lizard) host port; };
redis = {
host = config.services.redis.servers."state".bind;
port = config.services.redis.servers."state".port;
};
redis = registry.redis-bureaucrat-lizard;
logLevel = "debug";
workerCount = 6;
};
services = {
redis.servers."state" = {
enable = true;
bind = "127.0.0.1";
port = 5314;
};
nginx = {
enable = true;
recommendedProxySettings = true;
virtualHosts = {
"lizard.${(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ] config.deployment-TLMS.domain)}" = {
"lizard.${
(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ]
config.deployment-TLMS.domain)
}" = {
enableACME = true;
forceSSL = true;
extraConfig = ''
@ -40,7 +28,8 @@ in
enableACME = true;
locations = {
"/" = {
proxyPass = with config.TLMS.lizard.http; "http://${host}:${toString port}/";
proxyPass = with registry.port-lizard;
"http://${host}:${toString port}/";
proxyWebsockets = true;
};
};

View File

@ -4,7 +4,10 @@
enable = true;
recommendedProxySettings = true;
virtualHosts = {
"map.${(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ] config.deployment-TLMS.domain)}" = {
"map.${
(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ]
config.deployment-TLMS.domain)
}" = {
enableACME = true;
forceSSL = true;
extraConfig = ''

View File

@ -20,8 +20,7 @@ let
# STS
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
'';
in
{
in {
networking.firewall.allowedTCPPorts = [ 80 443 ];
security.acme.acceptTerms = true;

View File

@ -1,25 +1,22 @@
{ lib, pkgs, config, inputs, self, ... }: {
{ lib, pkgs, config, inputs, self, registry, ... }: {
services.postgresql = {
inherit (registry.postgres) port;
enable = true;
enableTCPIP = true;
port = 5432;
authentication =
let
senpai-ip = self.nixosConfigurations.notice-me-senpai.config.deployment-TLMS.net.wg.addr4;
in
pkgs.lib.mkOverride 10 ''
local all all trust
host all all 127.0.0.1/32 trust
host all all ::1/128 trust
host tlms grafana ${senpai-ip}/32 scram-sha-256
'';
authentication = let
senpai-ip =
self.unevaluatedNixosConfigurations.notice-me-senpai.specialArgs.registry.wgAddr4;
in pkgs.lib.mkOverride 10 ''
local all all trust
host all all 127.0.0.1/32 trust
host all all ::1/128 trust
host tlms grafana ${senpai-ip}/32 scram-sha-256
'';
package = pkgs.postgresql_14;
ensureDatabases = [ "tlms" ];
ensureUsers = [
{
name = "grafana";
}
{ name = "grafana"; }
{
name = "tlms";
ensurePermissions = {
@ -30,15 +27,12 @@
];
};
environment.systemPackages = [ inputs.tlms-rs.packages.x86_64-linux.run-migration-based ];
environment.systemPackages =
[ inputs.tlms-rs.packages.x86_64-linux.run-migration-based ];
systemd.services.postgresql = {
unitConfig = {
TimeoutStartSec = 3000;
};
serviceConfig = {
TimeoutSec = lib.mkForce 3000;
};
unitConfig = { TimeoutStartSec = 3000; };
serviceConfig = { TimeoutSec = lib.mkForce 3000; };
postStart = lib.mkAfter ''
# set pw for the users
$PSQL -c "ALTER ROLE tlms WITH PASSWORD '$(cat ${config.sops.secrets.postgres_password.path})';"
@ -63,9 +57,7 @@
systemd.services.dump-csv = {
path = [ config.services.postgresql.package ];
serviceConfig = {
User = "postgres";
};
serviceConfig = { User = "postgres"; };
script = ''
TMPFILE=$(mktemp)
OUT_FOLDER=/var/lib/pub-files/postgres-dumps/$(date -d"$(date) - 1 day" +"%Y-%m")

View File

@ -4,14 +4,19 @@ let
data-accumulator-user = config.TLMS.dataAccumulator.user;
trekkie-user = config.TLMS.trekkie.user;
chemo-user = config.TLMS.chemo.user;
in
{
in {
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
users.groups = {
postgres-tlms = {
name = "postgres-tlms";
members = [ datacare-user data-accumulator-user trekkie-user chemo-user "postgres" ];
members = [
datacare-user
data-accumulator-user
trekkie-user
chemo-user
"postgres"
];
};
password-salt = {
@ -19,18 +24,10 @@ in
members = [ datacare-user trekkie-user "postgres" ];
};
#TODO: remove this the two databases got merged
postgres-telegrams = {
name = "postgres-telegrams";
members = [ datacare-user data-accumulator-user "postgres" ];
};
};
sops.secrets = {
wg-seckey = {
owner = config.users.users.systemd-network.name;
};
wg-seckey = { owner = config.users.users.systemd-network.name; };
postgres_password_hash_salt = {
group = config.users.groups.password-salt.name;
mode = "0440";

View File

@ -1,21 +1,11 @@
{ config, ... }:
let
service_number = 2;
in
{
{ config, registry, ... }: {
TLMS.funnel = {
enable = true;
GRPC = {
host = "127.0.0.1";
port = 50050 + service_number;
};
defaultWebsocket = {
host = "127.0.0.1";
port = 9000 + service_number;
};
GRPC = registry.grpc-chemo-funnel;
defaultWebsocket = { inherit (registry.port-funnel) host port; };
metrics = {
port = 10010 + service_number;
host = config.deployment-TLMS.net.wg.addr4;
inherit (registry.port-funnel-metrics) port;
host = registry.wgAddr4;
};
};
services = {
@ -23,12 +13,16 @@ in
enable = true;
recommendedProxySettings = true;
virtualHosts = {
"socket.${(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ] config.deployment-TLMS.domain)}" = {
"socket.${
(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ]
config.deployment-TLMS.domain)
}" = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyWebsockets = true;
proxyPass = with config.TLMS.funnel.defaultWebsocket; "http://${host}:${toString port}/";
proxyPass = with registry.port-funnel;
"http://${host}:${toString port}/";
};
};
"socket.${config.deployment-TLMS.domain}" = {
@ -36,7 +30,8 @@ in
enableACME = true;
locations = {
"/" = {
proxyPass = with config.TLMS.funnel.defaultWebsocket; "http://${host}:${toString port}/";
proxyPass = with registry.port-funnel;
"http://${host}:${toString port}/";
proxyWebsockets = true;
};
};

View File

@ -1,24 +1,11 @@
{ config, ... }:
{
{ config, registry, ... }: {
TLMS.trekkie = {
inherit (registry.port-trekkie) host port;
enable = true;
host = "0.0.0.0";
saltPath = config.sops.secrets.postgres_password_hash_salt.path;
port = 8060;
database = {
host = "127.0.0.1";
port = config.services.postgresql.port;
passwordFile = config.sops.secrets.postgres_password.path;
user = "tlms";
};
redis = {
port = 6379;
host = "localhost";
};
grpc = {
host = config.TLMS.chemo.host;
port = config.TLMS.chemo.port;
};
database = registry.postgres;
redis = registry.redis-trekkie;
grpc = registry.grpc-trekkie-chemo;
logLevel = "info";
};
systemd.services."trekkie" = {
@ -27,17 +14,20 @@
};
services = {
redis.servers."trekkie" = {
redis.servers."trekkie" = with registry.redis-trekkie; {
inherit port;
enable = true;
bind = config.TLMS.trekkie.redis.host;
port = config.TLMS.trekkie.redis.port;
bind = host;
};
nginx = {
enable = true;
recommendedProxySettings = true;
virtualHosts = {
"trekkie.${(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ] config.deployment-TLMS.domain)}" = {
"trekkie.${
(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ]
config.deployment-TLMS.domain)
}" = {
enableACME = true;
forceSSL = true;
extraConfig = ''
@ -49,7 +39,8 @@
enableACME = true;
locations = {
"/" = {
proxyPass = with config.TLMS.trekkie; "http://${host}:${toString port}/";
proxyPass = with registry.port-trekkie;
"http://${host}:${toString port}/";
};
};
};

View File

@ -2,13 +2,14 @@
services.nginx = {
enable = true;
virtualHosts = {
"${(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ] config.deployment-TLMS.domain)}" = {
enableACME = true;
forceSSL = true;
extraConfig = ''
rewrite ^ https://kid.${config.deployment-TLMS.domain}/ permanent;
'';
};
"${(builtins.replaceStrings [ "tlm.solutions" ] [ "dvb.solutions" ]
config.deployment-TLMS.domain)}" = {
enableACME = true;
forceSSL = true;
extraConfig = ''
rewrite ^ https://kid.${config.deployment-TLMS.domain}/ permanent;
'';
};
"${config.deployment-TLMS.domain}" = {
enableACME = true;
forceSSL = true;

View File

@ -1,10 +1,8 @@
{ pkgs, config, self, ... }:
{ pkgs, config, registry, ... }:
{
boot.tmp.useTmpfs = true;
networking.hostName = "traffic-stop-box-${toString config.deployment-TLMS.systemNumber}"; # Define your hostname.
# reboot 60 seconds after kernel panic
boot.kernel.sysctl."kernel.panic" = 60;

View File

@ -1,37 +1,13 @@
{ config, lib, self, ... }:
let
file = with config.deployment-TLMS; "${self}/hosts/traffic-stop-box/${toString systemNumber}/config_${toString systemNumber}.json";
receiver_configs = [
{ frequency = 170790000; offset = 20000; device = ""; RF = 0; IF = 0; BB = 32; } # dresden - barkhausen
{ frequency = 170790000; offset = 20000; device = ""; RF = 14; IF = 32; BB = 42; } # dresden - zentralwerk
{ frequency = 153850000; offset = 20000; device = ""; RF = 14; IF = 32; BB = 42; } # chemnitz
{ frequency = 170795000; offset = 19400; device = ""; RF = 14; IF = 32; BB = 42; } # dresden unused
{ frequency = 170790000; offset = 20000; device = ""; RF = 14; IF = 32; BB = 42; } # dresden Wundstr. 9
{ frequency = 170790000; offset = 20000; device = ""; RF = 14; IF = 32; BB = 42; } # dresden test box
{ frequency = 150827500; offset = 19550; device = ""; RF = 14; IF = 32; BB = 42; } # warpzone münster
{ frequency = 150827500; offset = 19550; device = ""; RF = 14; IF = 32; BB = 42; } # drehturm aachen
{ frequency = 150890000; offset = 20000; device = ""; RF = 14; IF = 32; BB = 42; } # Hannover Bredero Hochhaus City
{ frequency = 152830000; offset = 20000; device = ""; RF = 14; IF = 32; BB = 42; } # Hannover Bredero Hochaus Umland
{ frequency = 153850000; offset = 20000; device = ""; RF = 14; IF = 32; BB = 42; } # CLT
];
receiver_config = lib.elemAt receiver_configs config.deployment-TLMS.systemNumber;
in
{ config, lib, registry, ... }:
{
TLMS.gnuradio = {
enable = true;
frequency = receiver_config.frequency;
offset = receiver_config.offset;
device = receiver_config.device;
RF = receiver_config.RF;
IF = receiver_config.IF;
BB = receiver_config.BB;
};
} // registry.gnuradio;
TLMS.telegramDecoder = {
enable = true;
server = [ "http://10.13.37.1:8080" "http://10.13.37.5:8080" "http://10.13.37.7:8080" ];
configFile = file;
configFile = registry.telegramDecoderConfig;
authTokenFile = config.sops.secrets.telegram-decoder-token.path;
};
}

View File

@ -1,6 +1,6 @@
{ config, self, ... }:
{ config, self, registry, ... }:
{
sops.defaultSopsFile = self + /secrets/traffic-stop-box-${toString config.deployment-TLMS.systemNumber}/secrets.yaml;
sops.defaultSopsFile = self + /secrets/${registry.hostName}/secrets.yaml;
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
sops.secrets.telegram-decoder-token.owner = config.users.users.telegram-decoder.name;

View File

@ -1,5 +1,5 @@
{ config, lib, ... }:
# pubkey of the box goes to hosts/traffic-stop-box/${id}.nix!
{ config, lib, registry, ... }:
# pubkey of the box goes to registry/traffic-stop-box/default.nix!
{
networking.useNetworkd = lib.mkForce true;
@ -8,7 +8,6 @@
};
deployment-TLMS.net.wg = {
addr4 = lib.mkDefault "10.13.37.${toString (config.deployment-TLMS.systemNumber + 100)}";
prefix4 = 24;
privateKeyFile = lib.mkDefault config.sops.secrets.wg-seckey.path;
};

View File

@ -11,10 +11,10 @@ let
# the deployment script
deployScriptTemplate = (system: command:
let
ip = system.config.deployment-TLMS.net.wg.addr4;
ip = system._module.specialArgs.registry.wgAddr4;
host = system.config.networking.hostName;
in
in
(pkgs.writeScriptBin "deploy" ''
#!${pkgs.runtimeShell}
set -e
@ -34,7 +34,7 @@ let
# garbage collect everything
garbageCollect = (system:
let
ip = system.config.deployment-TLMS.net.wg.addr4;
ip = system._module.specialArgs.registry.wgAddr4;
host = system.config.networking.hostName;
in
(pkgs.writeScriptBin "deploy" ''
@ -56,7 +56,7 @@ let
# reboot everything
reboot = (system:
let
ip = system.config.deployment-TLMS.net.wg.addr4;
ip = system._module.specialArgs.registry.wgAddr4;
host = system.config.networking.hostName;
in
(pkgs.writeScriptBin "deploy" ''
@ -77,11 +77,7 @@ let
''));
# individual script generation
deployScriptWriter = (command:
lib.mapAttrs'
(name: system:
lib.nameValuePair ("rebuild-" + command + "-" + name) (deployScriptTemplate system command))
nonVmHosts);
deployScriptWriter = (command: lib.mapAttrs' (name: system: lib.nameValuePair ("rebuild-" + command + "-" + name) (deployScriptTemplate system command)) nonVmHosts);
switchInstallScripts = deployScriptWriter "switch";
bootInstallScripts = deployScriptWriter "boot";
@ -98,21 +94,25 @@ let
garbageAll = lib.strings.concatMapStringsSep "\n" (path: "${path}/bin/deploy") (builtins.attrValues garbageCollectScripts);
nukeAll = lib.mapAttrs'
(name: scripts: lib.nameValuePair (name) (pkgs.writeScriptBin "${name}" ''
#!${pkgs.runtimeShell}
set -x
(name: scripts:
lib.nameValuePair (name) (pkgs.writeScriptBin "${name}" ''
#!${pkgs.runtimeShell}
set -x
${scripts}
''))
${scripts}
''))
{
rebuild-boot-all = bootAll;
rebuild-switch-all = switchAll;
reboot-all = rebootAll;
garbage-collect-all = garbageAll;
};
allPackages = installScripts // garbageCollectScripts // rebootScripts // nukeAll;
# rewrite to app definitions
in
installScripts //
garbageCollectScripts //
rebootScripts //
nukeAll
builtins.mapAttrs
(name: value: {
type = "app";
program = "${value}/bin/deploy";
})
allPackages

View File

@ -0,0 +1,63 @@
rec {
redis-bureaucrat-lizard = {
host = "127.0.0.1";
port = 5314;
};
grpc-chemo-bureaucrat = {
host = "127.0.0.1";
port = 50056;
};
grpc-chemo-funnel = {
host = "127.0.0.1";
port = 50052;
};
grpc-data_accumulator-chemo = {
host = "127.0.0.1";
port = 50053;
};
grpc-trekkie-chemo = grpc-data_accumulator-chemo;
port-data_accumulator = {
host = "0.0.0.0";
port = 8080;
};
port-datacare = {
host = "127.0.0.1";
port = 8070;
};
port-lizard = {
host = "127.0.0.1";
port = 9001;
};
port-funnel = {
host = "127.0.0.1";
port = 9002;
};
port-funnel-metrics = { port = 10012; };
port-trekkie = {
host = "0.0.0.0";
port = 8060;
};
redis-trekkie = {
host = "localhost";
port = 6379;
};
postgres = {
host = "127.0.0.1";
user = "tlms";
database = "tlms";
port = 5432;
passwordFile = "/run/secrets/postgres_password";
};
}

37
registry/default.nix Normal file
View File

@ -0,0 +1,37 @@
{
data-hoarder = import ./data-hoarder // {
wireguardPublicKey = "WDvCObJ0WgCCZ0ORV2q4sdXblBd8pOPZBmeWr97yphY=";
hostName = "data-hoarder";
wgAddr4 = "10.13.37.1";
publicWireguardEndpoint = {
host = "endpoint.tlm.solutions";
port = 51820;
};
};
staging-data-hoarder = import ./data-hoarder // {
hostName = "staging-data-hoarder";
wgAddr4 = "10.13.37.5";
wireguardPublicKey = "48hc7DVnUh2DHYhrxrNtNzj05MRecJO52j2niPImvkU=";
publicWireguardEndpoint = null;
};
traffic-stop-box = import ./traffic-stop-box;
notice-me-senpai = {
hostName = "notice-me-senpai";
wgAddr4 = "10.13.37.200";
wireguardPublicKey = "z2E9TjL9nn0uuLmyQexqddE6g8peB5ENyf0LxpMolD4=";
publicWireguardEndpoint = null;
port-loki = 3100;
};
uranus = {
hostName = "uranus";
wgAddr4 = "10.13.37.9";
wireguardPublicKey = "KwCG5CWPdNmrjEOYJYD2w0yhzoWpYHrjGbstdT5+pFk=";
publicWireguardEndpoint = null;
};
tram-borzoi = {
hostName = "tram-borzoi";
wgAddr4 = "10.13.37.8";
wireguardPublicKey = "wCW+r5kAaIarvZUWf4KsJNetyHobP0nNy5QOhqmsCCs=";
publicWireguardEndpoint = null;
};
}

View File

@ -0,0 +1,98 @@
{
# Barkhausen Bau
"0" = {
wireguardPublicKey = "qyStvzZdoqcjJJQckw4ZwvsQUa+8TBWtnsRxURqanno=";
hostName = "traffic-stop-box-0";
gnuradio = { frequency = 170790000; offset = 20000; device = ""; RF = 0; IF = 0; BB = 32; }; # dresden - barkhausen
wgAddr4 = "10.13.37.100";
arch = "x86_64-linux";
monitoring = true;
telegramDecoderConfig = ./config_0.json;
publicWireguardEndpoint = null;
};
# Zentralwerk
"1" = {
wireguardPublicKey = "dOPobdvfphx0EHmU7dd5ihslFzZi17XgRDQLMIUYa1w=";
hostName = "traffic-stop-box-1";
gnuradio = { frequency = 170790000; offset = 20000; device = ""; RF = 14; IF = 32; BB = 42; }; # dresden - zentralwerk
wgAddr4 = "10.13.37.101";
arch = "x86_64-linux";
monitoring = true;
telegramDecoderConfig = ./config_1.json;
publicWireguardEndpoint = null;
};
"2" = {
wireguardPublicKey = "4TUQCToGNhjsCgV9elYE/91Vd/RvMgvMXtF/1Dzlvxo=";
hostName = "traffic-stop-box-2";
gnuradio = { frequency = 153850000; offset = 20000; device = ""; RF = 14; IF = 32; BB = 42; }; # chemnitz
wgAddr4 = "10.13.37.102";
telegramDecoderConfig = ./config_2.json;
publicWireguardEndpoint = null;
};
"3" = {
wireguardPublicKey = "w3AT3EahW1sCK8ZsR7sDTcQj1McXYeWx7fnfQFA7i3o=";
hostName = "traffic-stop-box-3";
gnuradio = { frequency = 170795000; offset = 19400; device = ""; RF = 14; IF = 32; BB = 42; }; # dresden unused
wgAddr4 = "10.13.37.103";
telegramDecoderConfig = ./config_3.json;
publicWireguardEndpoint = null;
};
# Wundstr. 9
"4" = {
wireguardPublicKey = "B0wPH0jUxaatRncHMkgDEQ+DzvlbTBrVJY4etxqQgG8=";
hostName = "traffic-stop-box-4";
gnuradio = { frequency = 170790000; offset = 20000; device = ""; RF = 14; IF = 32; BB = 42; }; # dresden Wundstr. 9
wgAddr4 = "10.13.37.104";
arch = "x86_64-linux";
monitoring = true;
telegramDecoderConfig = ./config_4.json;
publicWireguardEndpoint = null;
};
# number 5 is missing
"6" = {
wireguardPublicKey = "NuLDNmxuHHzDXJSIOPSoihEhLWjARRtavuQvWirNR2I=";
hostName = "traffic-stop-box-6";
gnuradio = { frequency = 150827500; offset = 19550; device = ""; RF = 14; IF = 32; BB = 42; }; # warpzone münster
wgAddr4 = "10.13.37.106";
telegramDecoderConfig = ./config_6.json;
publicWireguardEndpoint = null;
};
"7" = {
wireguardPublicKey = "sMsdY7dSjlYeIFMqjkh4pJ/ftAYXlyRuxDGbdnGLpEQ=";
hostName = "traffic-stop-box-7";
gnuradio = { frequency = 150827500; offset = 19550; device = ""; RF = 14; IF = 32; BB = 42; }; # drehturm aachen
wgAddr4 = "10.13.37.107";
telegramDecoderConfig = ./config_7.json;
publicWireguardEndpoint = null;
};
# Hannover Bredero Hochhaus City
"8" = {
wireguardPublicKey = "dL9JGsBhaTOmXgGEH/N/GCHbQgVHEjBvIMaRtCsHBHw=";
hostName = "traffic-stop-box-8";
gnuradio = { frequency = 150890000; offset = 20000; device = ""; RF = 14; IF = 32; BB = 42; }; # Hannover Bredero Hochhaus City
wgAddr4 = "10.13.37.108";
arch = "aarch64-linux";
monitoring = false;
telegramDecoderConfig = ./config_8.json;
publicWireguardEndpoint = null;
};
# Hannover Bredero Hochhaus Wider Area
"9" = {
wireguardPublicKey = "j2hGr2rVv7T9kJE15c2IFWjmk0dXuJPev2BXiHZUKk8=";
hostName = "traffic-stop-box-9";
gnuradio = { frequency = 152830000; offset = 20000; device = ""; RF = 14; IF = 32; BB = 42; }; # Hannover Bredero Hochaus Umland
wgAddr4 = "10.13.37.109";
arch = "aarch64-linux";
monitoring = false;
telegramDecoderConfig = ./config_9.json;
publicWireguardEndpoint = null;
};
"10" = {
wireguardPublicKey = "dL9JGsBhaTOmXgGEH/N/GCHbQgVHEjBvIMaRtCsHBHw=";
hostName = "traffic-stop-box-10";
gnuradio = { frequency = 153850000; offset = 20000; device = ""; RF = 14; IF = 32; BB = 42; }; # CLT
wgAddr4 = "10.13.37.110";
telegramDecoderConfig = ./config_10.json;
publicWireguardEndpoint = null;
};
}