Compare commits

...

8 commits

22 changed files with 1459 additions and 69 deletions

View file

@ -27,9 +27,10 @@
ai:
openrouter_api_key: "file://${config.sops.secrets.ai-mailer-openrouter-key.path}"
model: "openai/gpt-5-mini"
model: "openai/gpt-5.4-mini"
temperature: 0.3
max_tokens: 200000
default_language: German
context:
urls:

View file

@ -47,7 +47,7 @@
endpoint = "5.9.131.17:51821";
publicKey = "T7jPGSapSudtKyWwi2nu+2hjjse96I4U3lccRHZWd2s=";
presharedKeyFile = config.sops.secrets.wg_epicenter_works_psk.path;
allowedIPs = [ "10.14.1.0/24" "10.14.2.0/24" "10.14.11.0/24" "10.14.40.0/24" "10.25.0.0/24" "10.50.60.0/24" ];
allowedIPs = [ "10.14.1.0/24" "10.14.2.0/24" "10.14.11.0/24" "10.14.40.0/24" "10.25.0.0/24" "10.50.60.0/24" "10.60.60.0/24" ];
}
];
};

View file

@ -1,5 +1,45 @@
{ lib, pkgs, ... }:
{ lib, pkgs, ... }:
let
epicenter-vm = pkgs.writeShellScriptBin "epicenter-vm" ''
set -euo pipefail
VM_DIR="$HOME/epicenter-vm"
DISK="$VM_DIR/disk.qcow2"
ISO=""
while [[ $# -gt 0 ]]; do
case "$1" in
--iso) ISO="$2"; shift 2 ;;
*) echo "Usage: epicenter-vm [--iso /path/to/file.iso]"; exit 1 ;;
esac
done
# Create disk if it doesn't exist
if [ ! -f "$DISK" ]; then
mkdir -p "$VM_DIR"
${pkgs.qemu}/bin/qemu-img create -f qcow2 "$DISK" 60G
fi
QEMU_ARGS=(
-enable-kvm
-m 4G
-smp 2
-drive "file=$DISK,format=qcow2,if=virtio"
-bios ${pkgs.OVMF.fd}/FV/OVMF.fd
-display gtk,gl=on
-device virtio-vga-gl
-device virtio-net-pci,netdev=net0
-netdev user,id=net0,hostfwd=tcp::2222-:22
)
# Attach ISO if provided
if [ -n "$ISO" ]; then
QEMU_ARGS+=(-cdrom "$ISO" -boot d)
fi
exec ${pkgs.qemu}/bin/qemu-system-x86_64 "''${QEMU_ARGS[@]}"
'';
wrapperScript = pkgs.writeShellScriptBin "rustdesk-epicenter-wrapper" ''
# Grant epicenter user access to the Wayland socket
${pkgs.acl}/bin/setfacl -m u:epicenter:x "$XDG_RUNTIME_DIR"
@ -22,6 +62,7 @@ let
};
in {
environment.systemPackages = [
epicenter-vm
rustdeskEpicenterDesktopItem
];

View file

@ -1,5 +1,27 @@
{ config, lib, pkgs, ... }:
let
ddev-dns-update = pkgs.writeShellScriptBin "ddev-dns-update" ''
set -e
hosts_file="/var/lib/dnsmasq/ddev-hosts"
new_hosts=$(${pkgs.coreutils}/bin/mktemp)
trap '${pkgs.coreutils}/bin/rm -f "$new_hosts"' EXIT
# Get running DDEV project names
running=$(${pkgs.ddev}/bin/ddev list --json-output 2>/dev/null \
| ${pkgs.jq}/bin/jq -r '.raw[]? | select(.status == "running") | .name // empty' 2>/dev/null) || true
# Build hosts entries
for name in $running; do
echo "127.0.0.1 $name.ddev.site" >> "$new_hosts"
done
# Only reload dnsmasq if content changed
if ! ${pkgs.diffutils}/bin/cmp -s "$new_hosts" "$hosts_file"; then
${pkgs.coreutils}/bin/cp "$new_hosts" "$hosts_file"
/run/wrappers/bin/sudo /run/current-system/systemd/bin/systemctl reload dnsmasq.service 2>/dev/null || true
fi
'';
in
{
# Enable systemd-resolved with split DNS for ddev.site
services.resolved = {
@ -14,18 +36,66 @@
# Integrate NetworkManager with systemd-resolved
networking.networkmanager.dns = "systemd-resolved";
# Local dnsmasq for .ddev.site resolution only (port 5353)
# Local dnsmasq for .ddev.site resolution (port 5353)
# Dynamic hosts file resolves running DDEV projects to 127.0.0.1;
# unmatched .ddev.site queries forward to VPN DNS (returns dev server IP)
services.dnsmasq = {
enable = true;
resolveLocalQueries = false;
settings = {
port = 5353;
listen-address = "127.0.0.1";
bind-interfaces = true;
no-resolv = true;
address = "/.ddev.site/127.0.0.1";
server = [ "/ddev.site/10.42.97.1" ];
addn-hosts = "/var/lib/dnsmasq/ddev-hosts";
};
};
# Ensure hosts file exists before dnsmasq starts (owned by dominik so the
# user-level service can write it)
systemd.tmpfiles.rules = [
"d /var/lib/dnsmasq 0755 root root -"
"f /var/lib/dnsmasq/ddev-hosts 0644 dominik root -"
];
# Poll running DDEV projects and update dnsmasq hosts
# Runs as dominik because ddev needs user-level Docker access
systemd.services.ddev-dns-update = {
description = "Update dnsmasq hosts for running DDEV projects";
after = [ "dnsmasq.service" "docker.service" ];
serviceConfig = {
Type = "oneshot";
User = "dominik";
ExecStart = "${ddev-dns-update}/bin/ddev-dns-update";
};
};
systemd.timers.ddev-dns-update = {
wantedBy = [ "timers.target" ];
timerConfig = {
OnBootSec = "30s";
OnUnitActiveSec = "10s";
};
};
environment.systemPackages = [ ddev-dns-update ];
security.sudo.extraRules = [
{
users = [ "dominik" ];
commands = [
{
command = "${ddev-dns-update}/bin/ddev-dns-update";
options = [ "NOPASSWD" ];
}
{
command = "/run/current-system/systemd/bin/systemctl reload dnsmasq.service";
options = [ "NOPASSWD" ];
}
];
}
];
# WireGuard VPN configuration
networking.wireguard.interfaces = {
wg0 = {

View file

@ -8,7 +8,7 @@ let
-v 'ipp://brn30055c566237.cloonar.multimedia/ipp/print' \
-m 'everywhere'
lpadmin -d 'epicenter.works'
lpadmin -d 'Cloonar'
'';
};

View file

@ -724,7 +724,7 @@ in
};
identityFile = "~/.ssh/epicenter.id_rsa";
};
"*.cloonar.com" = {
"*.cloonar.com !mac.cloonar.com" = {
user = "root";
};
"*.cloonar.smart" = {

View file

@ -20,6 +20,7 @@
./modules/blackbox-exporter.nix
./modules/updns.nix
./modules/atticd.nix
./modules/supabase
./utils/modules/autoupgrade.nix
./utils/modules/promtail

View file

@ -0,0 +1,431 @@
{ config, lib, pkgs, ... }:
let
kongEntrypoint = pkgs.writeTextFile {
name = "kong-entrypoint.sh";
executable = true;
text = builtins.readFile ./kong-entrypoint.sh;
};
envGenerateScript = pkgs.writeShellScript "supabase-env-generate"
(builtins.readFile ./env-generate.sh);
# Common extra options for all containers to join the supabase network
supabaseNet = [ "--network=supabase-net" ];
in
{
# --- SOPS secret ---
sops.secrets.supabase-env = { };
# --- Persistent data directories ---
systemd.tmpfiles.rules = [
"d /var/lib/supabase/db/data 0700 root root -"
"d /var/lib/supabase/storage 0755 root root -"
"d /var/lib/supabase/functions 0755 root root -"
"d /var/lib/supabase/snippets 0755 root root -"
];
# --- Systemd services: network, env generation, and container ordering ---
systemd.services =
let
containerNames = [
"supabase-db"
"supabase-analytics"
"supabase-auth"
"supabase-rest"
"supabase-realtime"
"supabase-storage"
"supabase-imgproxy"
"supabase-meta"
"supabase-studio"
"supabase-kong"
"supabase-vector"
"supabase-pooler"
"supabase-functions"
];
mkContainerDeps = name: {
"podman-${name}" = {
after = [ "init-supabase-network.service" "supabase-env-generate.service" ];
requires = [ "init-supabase-network.service" "supabase-env-generate.service" ];
};
};
in
lib.mkMerge (map mkContainerDeps containerNames ++ [
{
init-supabase-network = {
description = "Create supabase-net Podman network";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
# '-' prefix tells systemd to ignore non-zero exit (network may already exist)
ExecStart = "-${pkgs.podman}/bin/podman network create supabase-net";
};
};
supabase-env-generate = {
description = "Generate Supabase per-container env files from SOPS secrets";
wantedBy = [ "multi-user.target" ];
path = [ pkgs.jq ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = "${envGenerateScript} ${config.sops.secrets.supabase-env.path}";
};
};
}
]);
# --- Containers ---
virtualisation.oci-containers.containers = {
# 1. PostgreSQL
supabase-db = {
image = "supabase/postgres:15.8.1.085";
environment = {
POSTGRES_HOST = "/var/run/postgresql";
PGPORT = "5432";
POSTGRES_PORT = "5432";
PGDATABASE = "postgres";
POSTGRES_DB = "postgres";
JWT_EXP = "3600";
};
environmentFiles = [ "/run/supabase/db.env" ];
volumes = [
"/var/lib/supabase/db/data:/var/lib/postgresql/data"
"${./sql/_supabase.sql}:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:ro"
"${./sql/realtime.sql}:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:ro"
"${./sql/logs.sql}:/docker-entrypoint-initdb.d/migrations/99-logs.sql:ro"
"${./sql/pooler.sql}:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:ro"
"${./sql/webhooks.sql}:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:ro"
"${./sql/roles.sql}:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:ro"
"${./sql/jwt.sql}:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:ro"
"supabase-db-config:/etc/postgresql-custom"
];
cmd = [
"postgres"
"-c" "config_file=/etc/postgresql/postgresql.conf"
"-c" "log_min_messages=fatal"
];
extraOptions = supabaseNet ++ [
"--network-alias=db"
"--shm-size=2g"
];
};
# 2. Analytics (Logflare)
supabase-analytics = {
image = "supabase/logflare:1.31.2";
dependsOn = [ "supabase-db" ];
environment = {
LOGFLARE_NODE_HOST = "127.0.0.1";
DB_USERNAME = "supabase_admin";
DB_DATABASE = "_supabase";
DB_HOSTNAME = "db";
DB_PORT = "5432";
DB_SCHEMA = "_analytics";
LOGFLARE_SINGLE_TENANT = "true";
LOGFLARE_SUPABASE_MODE = "true";
POSTGRES_BACKEND_SCHEMA = "_analytics";
LOGFLARE_FEATURE_FLAG_OVERRIDE = "multibackend=true";
};
environmentFiles = [ "/run/supabase/analytics.env" ];
extraOptions = supabaseNet ++ [
"--network-alias=analytics"
];
};
# 3. Auth (GoTrue)
supabase-auth = {
image = "supabase/gotrue:v2.186.0";
dependsOn = [ "supabase-db" "supabase-analytics" ];
environment = {
GOTRUE_API_HOST = "0.0.0.0";
GOTRUE_API_PORT = "9999";
API_EXTERNAL_URL = "https://supabase.cloonar.com";
GOTRUE_DB_DRIVER = "postgres";
GOTRUE_SITE_URL = "https://supabase.cloonar.com";
GOTRUE_URI_ALLOW_LIST = "";
GOTRUE_DISABLE_SIGNUP = "false";
GOTRUE_JWT_ADMIN_ROLES = "service_role";
GOTRUE_JWT_AUD = "authenticated";
GOTRUE_JWT_DEFAULT_GROUP_NAME = "authenticated";
GOTRUE_JWT_EXP = "3600";
GOTRUE_EXTERNAL_EMAIL_ENABLED = "true";
GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED = "false";
GOTRUE_MAILER_AUTOCONFIRM = "true";
GOTRUE_SMTP_ADMIN_EMAIL = "admin@cloonar.com";
GOTRUE_SMTP_HOST = "supabase-mail";
GOTRUE_SMTP_PORT = "2500";
GOTRUE_SMTP_USER = "";
GOTRUE_SMTP_PASS = "";
GOTRUE_SMTP_SENDER_NAME = "Supabase";
GOTRUE_MAILER_URLPATHS_INVITE = "/auth/v1/verify";
GOTRUE_MAILER_URLPATHS_CONFIRMATION = "/auth/v1/verify";
GOTRUE_MAILER_URLPATHS_RECOVERY = "/auth/v1/verify";
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE = "/auth/v1/verify";
GOTRUE_EXTERNAL_PHONE_ENABLED = "false";
GOTRUE_SMS_AUTOCONFIRM = "false";
};
environmentFiles = [ "/run/supabase/auth.env" ];
extraOptions = supabaseNet ++ [
"--network-alias=auth"
];
};
# 4. REST (PostgREST)
supabase-rest = {
image = "postgrest/postgrest:v14.6";
dependsOn = [ "supabase-db" ];
environment = {
PGRST_DB_SCHEMAS = "public,storage,graphql_public";
PGRST_DB_MAX_ROWS = "1000";
PGRST_DB_EXTRA_SEARCH_PATH = "public";
PGRST_DB_ANON_ROLE = "anon";
PGRST_DB_USE_LEGACY_GUCS = "false";
PGRST_APP_SETTINGS_JWT_EXP = "3600";
};
environmentFiles = [ "/run/supabase/rest.env" ];
cmd = [ "postgrest" ];
extraOptions = supabaseNet ++ [
"--network-alias=rest"
];
};
# 5. Realtime
supabase-realtime = {
image = "supabase/realtime:v2.76.5";
dependsOn = [ "supabase-db" ];
environment = {
PORT = "4000";
DB_HOST = "db";
DB_PORT = "5432";
DB_USER = "supabase_admin";
DB_NAME = "postgres";
DB_AFTER_CONNECT_QUERY = "SET search_path TO _realtime";
DB_ENC_KEY = "supabaserealtime";
ERL_AFLAGS = "-proto_dist inet_tcp";
DNS_NODES = "''";
RLIMIT_NOFILE = "10000";
APP_NAME = "realtime";
SEED_SELF_HOST = "true";
RUN_JANITOR = "true";
DISABLE_HEALTHCHECK_LOGGING = "true";
};
environmentFiles = [ "/run/supabase/realtime.env" ];
extraOptions = supabaseNet ++ [
# Hostname must be realtime-dev.supabase-realtime for tenant ID parsing
"--hostname=realtime-dev.supabase-realtime"
"--network-alias=realtime-dev.supabase-realtime"
];
};
# 6. Storage
supabase-storage = {
image = "supabase/storage-api:v1.44.2";
dependsOn = [ "supabase-db" "supabase-rest" "supabase-imgproxy" ];
environment = {
POSTGREST_URL = "http://rest:3000";
STORAGE_PUBLIC_URL = "https://supabase.cloonar.com";
REQUEST_ALLOW_X_FORWARDED_PATH = "true";
FILE_SIZE_LIMIT = "52428800";
STORAGE_BACKEND = "file";
GLOBAL_S3_BUCKET = "stub";
FILE_STORAGE_BACKEND_PATH = "/var/lib/storage";
TENANT_ID = "stub";
REGION = "stub";
ENABLE_IMAGE_TRANSFORMATION = "true";
IMGPROXY_URL = "http://imgproxy:5001";
};
environmentFiles = [ "/run/supabase/storage.env" ];
volumes = [
"/var/lib/supabase/storage:/var/lib/storage"
];
extraOptions = supabaseNet ++ [
"--network-alias=storage"
];
};
# 7. Imgproxy
supabase-imgproxy = {
image = "darthsim/imgproxy:v3.30.1";
environment = {
IMGPROXY_BIND = ":5001";
IMGPROXY_LOCAL_FILESYSTEM_ROOT = "/";
IMGPROXY_USE_ETAG = "true";
IMGPROXY_AUTO_WEBP = "true";
IMGPROXY_MAX_SRC_RESOLUTION = "16.8";
};
volumes = [
"/var/lib/supabase/storage:/var/lib/storage"
];
extraOptions = supabaseNet ++ [
"--network-alias=imgproxy"
];
};
# 8. Meta (pg-meta)
supabase-meta = {
image = "supabase/postgres-meta:v0.95.2";
dependsOn = [ "supabase-db" ];
environment = {
PG_META_PORT = "8080";
PG_META_DB_HOST = "db";
PG_META_DB_PORT = "5432";
PG_META_DB_NAME = "postgres";
PG_META_DB_USER = "supabase_admin";
};
environmentFiles = [ "/run/supabase/meta.env" ];
extraOptions = supabaseNet ++ [
"--network-alias=meta"
];
};
# 9. Studio
supabase-studio = {
image = "supabase/studio:2026.03.16-sha-5528817";
dependsOn = [ "supabase-analytics" ];
environment = {
HOSTNAME = "::";
STUDIO_PG_META_URL = "http://meta:8080";
POSTGRES_PORT = "5432";
POSTGRES_HOST = "db";
POSTGRES_DB = "postgres";
PGRST_DB_SCHEMAS = "public,storage,graphql_public";
PGRST_DB_MAX_ROWS = "1000";
PGRST_DB_EXTRA_SEARCH_PATH = "public";
DEFAULT_ORGANIZATION_NAME = "Default Organization";
DEFAULT_PROJECT_NAME = "Default Project";
SUPABASE_URL = "http://kong:8000";
SUPABASE_PUBLIC_URL = "https://supabase.cloonar.com";
NEXT_PUBLIC_ENABLE_LOGS = "true";
NEXT_ANALYTICS_BACKEND_PROVIDER = "postgres";
LOGFLARE_URL = "http://analytics:4000";
SNIPPETS_MANAGEMENT_FOLDER = "/app/snippets";
EDGE_FUNCTIONS_MANAGEMENT_FOLDER = "/app/edge-functions";
};
environmentFiles = [ "/run/supabase/studio.env" ];
volumes = [
"/var/lib/supabase/snippets:/app/snippets"
"/var/lib/supabase/functions:/app/edge-functions"
];
extraOptions = supabaseNet ++ [
"--network-alias=studio"
];
};
# 10. Kong (API Gateway)
supabase-kong = {
image = "kong/kong:3.9.1";
dependsOn = [ "supabase-studio" ];
environment = {
KONG_DATABASE = "off";
KONG_DECLARATIVE_CONFIG = "/usr/local/kong/kong.yml";
KONG_DNS_ORDER = "LAST,A,CNAME";
KONG_DNS_NOT_FOUND_TTL = "1";
KONG_PLUGINS = "request-transformer,cors,key-auth,acl,basic-auth,request-termination,ip-restriction,post-function";
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE = "160k";
KONG_NGINX_PROXY_PROXY_BUFFERS = "64 160k";
KONG_PROXY_ACCESS_LOG = "/dev/stdout combined";
};
environmentFiles = [ "/run/supabase/kong.env" ];
ports = [
"127.0.0.1:8000:8000"
"127.0.0.1:8443:8443"
];
volumes = [
"${./kong.yml}:/home/kong/temp.yml:ro"
"${kongEntrypoint}:/home/kong/kong-entrypoint.sh:ro"
];
entrypoint = "/home/kong/kong-entrypoint.sh";
extraOptions = supabaseNet ++ [
"--network-alias=kong"
];
};
# 11. Vector (log collection)
supabase-vector = {
image = "timberio/vector:0.53.0-alpine";
environment = { };
environmentFiles = [ "/run/supabase/vector.env" ];
volumes = [
"${./vector.yml}:/etc/vector/vector.yml:ro"
"/var/run/docker.sock:/var/run/docker.sock:ro"
];
cmd = [ "--config" "/etc/vector/vector.yml" ];
extraOptions = supabaseNet ++ [
"--network-alias=vector"
"--security-opt=label=disable"
];
};
# 12. Pooler (Supavisor)
supabase-pooler = {
image = "supabase/supavisor:2.7.4";
dependsOn = [ "supabase-db" ];
environment = {
PORT = "4000";
CLUSTER_POSTGRES = "true";
REGION = "local";
ERL_AFLAGS = "-proto_dist inet_tcp";
POOLER_POOL_MODE = "transaction";
POSTGRES_PORT = "5432";
POSTGRES_DB = "postgres";
POOLER_TENANT_ID = "default-tenant";
POOLER_DEFAULT_POOL_SIZE = "20";
POOLER_MAX_CLIENT_CONN = "100";
DB_POOL_SIZE = "10";
};
environmentFiles = [ "/run/supabase/pooler.env" ];
volumes = [
"${./pooler.exs}:/etc/pooler/pooler.exs:ro"
];
cmd = [
"/bin/sh" "-c"
"/app/bin/migrate && /app/bin/supavisor eval \"$(cat /etc/pooler/pooler.exs)\" && /app/bin/server"
];
extraOptions = supabaseNet ++ [
"--network-alias=pooler"
];
};
# 13. Edge Functions
supabase-functions = {
image = "supabase/edge-runtime:v1.71.2";
dependsOn = [ "supabase-kong" ];
environment = {
SUPABASE_URL = "http://kong:8000";
SUPABASE_PUBLIC_URL = "https://supabase.cloonar.com";
VERIFY_JWT = "false";
};
environmentFiles = [ "/run/supabase/functions.env" ];
volumes = [
"/var/lib/supabase/functions:/home/deno/functions"
"supabase-deno-cache:/root/.cache/deno"
];
cmd = [ "start" "--main-service" "/home/deno/functions/main" ];
extraOptions = supabaseNet ++ [
"--network-alias=functions"
];
};
};
# --- Nginx reverse proxy ---
services.nginx.virtualHosts."supabase.cloonar.com" = {
forceSSL = true;
enableACME = true;
acmeRoot = null;
locations."/" = {
proxyPass = "http://127.0.0.1:8000";
proxyWebsockets = true;
extraConfig = ''
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 86400s;
proxy_send_timeout 86400s;
client_max_body_size 50M;
'';
};
};
}

View file

@ -0,0 +1,92 @@
set -euo pipefail
umask 077
mkdir -p /run/supabase
set -a
source "$1"
set +a
# URL-encode password for use in connection strings
PG_PASS_ENCODED=$(printf '%s' "$POSTGRES_PASSWORD" | jq -sRr @uri)
cat > /run/supabase/db.env <<EOF
POSTGRES_PASSWORD=$POSTGRES_PASSWORD
PGPASSWORD=$POSTGRES_PASSWORD
JWT_SECRET=$JWT_SECRET
EOF
cat > /run/supabase/analytics.env <<EOF
DB_PASSWORD=$POSTGRES_PASSWORD
LOGFLARE_PUBLIC_ACCESS_TOKEN=$LOGFLARE_PUBLIC_ACCESS_TOKEN
LOGFLARE_PRIVATE_ACCESS_TOKEN=$LOGFLARE_PRIVATE_ACCESS_TOKEN
POSTGRES_BACKEND_URL=postgresql://supabase_admin:$PG_PASS_ENCODED@db:5432/_supabase
EOF
cat > /run/supabase/auth.env <<EOF
GOTRUE_JWT_SECRET=$JWT_SECRET
GOTRUE_DB_DATABASE_URL=postgres://supabase_auth_admin:$PG_PASS_ENCODED@db:5432/postgres
EOF
cat > /run/supabase/rest.env <<EOF
PGRST_JWT_SECRET=$JWT_SECRET
PGRST_APP_SETTINGS_JWT_SECRET=$JWT_SECRET
PGRST_DB_URI=postgres://authenticator:$PG_PASS_ENCODED@db:5432/postgres
EOF
cat > /run/supabase/realtime.env <<EOF
DB_PASSWORD=$POSTGRES_PASSWORD
API_JWT_SECRET=$JWT_SECRET
SECRET_KEY_BASE=$SECRET_KEY_BASE
METRICS_JWT_SECRET=$JWT_SECRET
EOF
cat > /run/supabase/storage.env <<EOF
ANON_KEY=$ANON_KEY
SERVICE_KEY=$SERVICE_ROLE_KEY
AUTH_JWT_SECRET=$JWT_SECRET
DATABASE_URL=postgres://supabase_storage_admin:$PG_PASS_ENCODED@db:5432/postgres
S3_PROTOCOL_ACCESS_KEY_ID=$S3_PROTOCOL_ACCESS_KEY_ID
S3_PROTOCOL_ACCESS_KEY_SECRET=$S3_PROTOCOL_ACCESS_KEY_SECRET
EOF
cat > /run/supabase/meta.env <<EOF
PG_META_DB_PASSWORD=$POSTGRES_PASSWORD
CRYPTO_KEY=$PG_META_CRYPTO_KEY
EOF
cat > /run/supabase/studio.env <<EOF
POSTGRES_PASSWORD=$PG_PASS_ENCODED
PG_META_CRYPTO_KEY=$PG_META_CRYPTO_KEY
SUPABASE_ANON_KEY=$ANON_KEY
SUPABASE_SERVICE_KEY=$SERVICE_ROLE_KEY
AUTH_JWT_SECRET=$JWT_SECRET
LOGFLARE_API_KEY=$LOGFLARE_PUBLIC_ACCESS_TOKEN
LOGFLARE_PUBLIC_ACCESS_TOKEN=$LOGFLARE_PUBLIC_ACCESS_TOKEN
LOGFLARE_PRIVATE_ACCESS_TOKEN=$LOGFLARE_PRIVATE_ACCESS_TOKEN
EOF
cat > /run/supabase/kong.env <<EOF
SUPABASE_ANON_KEY=$ANON_KEY
SUPABASE_SERVICE_KEY=$SERVICE_ROLE_KEY
DASHBOARD_USERNAME=supabase
DASHBOARD_PASSWORD=$DASHBOARD_PASSWORD
EOF
cat > /run/supabase/vector.env <<EOF
LOGFLARE_PUBLIC_ACCESS_TOKEN=$LOGFLARE_PUBLIC_ACCESS_TOKEN
EOF
cat > /run/supabase/pooler.env <<EOF
POSTGRES_PASSWORD=$POSTGRES_PASSWORD
DATABASE_URL=ecto://supabase_admin:$PG_PASS_ENCODED@db:5432/_supabase
SECRET_KEY_BASE=$SECRET_KEY_BASE
VAULT_ENC_KEY=$VAULT_ENC_KEY
API_JWT_SECRET=$JWT_SECRET
METRICS_JWT_SECRET=$JWT_SECRET
EOF
cat > /run/supabase/functions.env <<EOF
JWT_SECRET=$JWT_SECRET
SUPABASE_ANON_KEY=$ANON_KEY
SUPABASE_SERVICE_ROLE_KEY=$SERVICE_ROLE_KEY
EOF

View file

@ -0,0 +1,25 @@
#!/bin/bash
# Legacy API keys, not sb_ API keys -> pass apikey through unchanged
export LUA_AUTH_EXPR="\$((headers.authorization ~= nil and headers.authorization:sub(1, 10) ~= 'Bearer sb_' and headers.authorization) or headers.apikey)"
export LUA_RT_WS_EXPR="\$(query_params.apikey)"
# Substitute environment variables in the Kong declarative config
awk '{
result = ""
rest = $0
while (match(rest, /\$[A-Za-z_][A-Za-z_0-9]*/)) {
varname = substr(rest, RSTART + 1, RLENGTH - 1)
if (varname in ENVIRON) {
result = result substr(rest, 1, RSTART - 1) ENVIRON[varname]
} else {
result = result substr(rest, 1, RSTART + RLENGTH - 1)
}
rest = substr(rest, RSTART + RLENGTH)
}
print result rest
}' /home/kong/temp.yml > "$KONG_DECLARATIVE_CONFIG"
# Remove empty key-auth credentials (unconfigured opaque keys)
sed -i '/^[[:space:]]*- key:[[:space:]]*$/d' "$KONG_DECLARATIVE_CONFIG"
exec /entrypoint.sh kong docker-start

View file

@ -0,0 +1,265 @@
_format_version: '2.1'
_transform: true
consumers:
- username: DASHBOARD
- username: anon
keyauth_credentials:
- key: $SUPABASE_ANON_KEY
- username: service_role
keyauth_credentials:
- key: $SUPABASE_SERVICE_KEY
acls:
- consumer: anon
group: anon
- consumer: service_role
group: admin
basicauth_credentials:
- consumer: DASHBOARD
username: '$DASHBOARD_USERNAME'
password: '$DASHBOARD_PASSWORD'
services:
- name: auth-v1-open
url: http://auth:9999/verify
routes:
- name: auth-v1-open
strip_path: true
paths:
- /auth/v1/verify
plugins:
- name: cors
- name: auth-v1-open-callback
url: http://auth:9999/callback
routes:
- name: auth-v1-open-callback
strip_path: true
paths:
- /auth/v1/callback
plugins:
- name: cors
- name: auth-v1-open-authorize
url: http://auth:9999/authorize
routes:
- name: auth-v1-open-authorize
strip_path: true
paths:
- /auth/v1/authorize
plugins:
- name: cors
- name: auth-v1-open-jwks
url: http://auth:9999/.well-known/jwks.json
routes:
- name: auth-v1-open-jwks
strip_path: true
paths:
- /auth/v1/.well-known/jwks.json
plugins:
- name: cors
- name: auth-v1
url: http://auth:9999/
routes:
- name: auth-v1-all
strip_path: true
paths:
- /auth/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: request-transformer
config:
add:
headers:
- "Authorization: $LUA_AUTH_EXPR"
replace:
headers:
- "Authorization: $LUA_AUTH_EXPR"
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: rest-v1
url: http://rest:3000/
routes:
- name: rest-v1-all
strip_path: true
paths:
- /rest/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: request-transformer
config:
add:
headers:
- "Authorization: $LUA_AUTH_EXPR"
replace:
headers:
- "Authorization: $LUA_AUTH_EXPR"
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: graphql-v1
url: http://rest:3000/rpc/graphql
routes:
- name: graphql-v1-all
strip_path: true
paths:
- /graphql/v1
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: request-transformer
config:
add:
headers:
- "Content-Profile: graphql_public"
- "Authorization: $LUA_AUTH_EXPR"
replace:
headers:
- "Authorization: $LUA_AUTH_EXPR"
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: realtime-v1-ws
url: http://realtime-dev.supabase-realtime:4000/socket
protocol: ws
routes:
- name: realtime-v1-ws
strip_path: true
paths:
- /realtime/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: request-transformer
config:
add:
headers:
- "x-api-key:$LUA_RT_WS_EXPR"
replace:
querystring:
- "apikey:$LUA_RT_WS_EXPR"
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: realtime-v1-rest
url: http://realtime-dev.supabase-realtime:4000/api
protocol: http
routes:
- name: realtime-v1-rest
strip_path: true
paths:
- /realtime/v1/api
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: request-transformer
config:
add:
headers:
- "Authorization: $LUA_AUTH_EXPR"
replace:
headers:
- "Authorization: $LUA_AUTH_EXPR"
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: storage-v1
url: http://storage:5000/
routes:
- name: storage-v1-all
strip_path: true
paths:
- /storage/v1/
plugins:
- name: cors
- name: request-transformer
config:
add:
headers:
- "Authorization: $LUA_AUTH_EXPR"
replace:
headers:
- "Authorization: $LUA_AUTH_EXPR"
- name: post-function
config:
access:
- |
local auth = kong.request.get_header("authorization")
if auth == nil or auth == "" or auth:find("^%s*$") then
kong.service.request.clear_header("authorization")
end
- name: functions-v1
url: http://functions:9000/
read_timeout: 150000
routes:
- name: functions-v1-all
strip_path: true
paths:
- /functions/v1/
plugins:
- name: cors
- name: well-known-oauth
url: http://auth:9999/.well-known/oauth-authorization-server
routes:
- name: well-known-oauth
strip_path: true
paths:
- /.well-known/oauth-authorization-server
plugins:
- name: cors
- name: meta
url: http://meta:8080/
routes:
- name: meta-all
strip_path: true
paths:
- /pg/
plugins:
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- name: dashboard
url: http://studio:3000/
routes:
- name: dashboard-all
strip_path: true
paths:
- /
plugins:
- name: cors
- name: basic-auth
config:
hide_credentials: true

View file

@ -0,0 +1,30 @@
{:ok, _} = Application.ensure_all_started(:supavisor)
{:ok, version} =
case Supavisor.Repo.query!("select version()") do
%{rows: [[ver]]} -> Supavisor.Helpers.parse_pg_version(ver)
_ -> nil
end
params = %{
"external_id" => System.get_env("POOLER_TENANT_ID"),
"db_host" => "db",
"db_port" => System.get_env("POSTGRES_PORT"),
"db_database" => System.get_env("POSTGRES_DB"),
"require_user" => false,
"auth_query" => "SELECT * FROM pgbouncer.get_auth($1)",
"default_max_clients" => System.get_env("POOLER_MAX_CLIENT_CONN"),
"default_pool_size" => System.get_env("POOLER_DEFAULT_POOL_SIZE"),
"default_parameter_status" => %{"server_version" => version},
"users" => [%{
"db_user" => "pgbouncer",
"db_password" => System.get_env("POSTGRES_PASSWORD"),
"mode_type" => System.get_env("POOLER_POOL_MODE"),
"pool_size" => System.get_env("POOLER_DEFAULT_POOL_SIZE"),
"is_manager" => true
}]
}
if !Supavisor.Tenants.get_tenant_by_external_id(params["external_id"]) do
{:ok, _} = Supavisor.Tenants.create_tenant(params)
end

View file

@ -0,0 +1,2 @@
\set pguser `echo "$POSTGRES_USER"`
CREATE DATABASE _supabase WITH OWNER :pguser;

View file

@ -0,0 +1,4 @@
\set jwt_secret `echo "$JWT_SECRET"`
\set jwt_exp `echo "$JWT_EXP"`
ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret';
ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp';

View file

@ -0,0 +1,5 @@
\set pguser `echo "$POSTGRES_USER"`
\c _supabase
create schema if not exists _analytics;
alter schema _analytics owner to :pguser;
\c postgres

View file

@ -0,0 +1,5 @@
\set pguser `echo "$POSTGRES_USER"`
\c _supabase
create schema if not exists _supavisor;
alter schema _supavisor owner to :pguser;
\c postgres

View file

@ -0,0 +1,3 @@
\set pguser `echo "$POSTGRES_USER"`
create schema if not exists _realtime;
alter schema _realtime owner to :pguser;

View file

@ -0,0 +1,6 @@
\set pgpass `echo "$POSTGRES_PASSWORD"`
ALTER USER authenticator WITH PASSWORD :'pgpass';
ALTER USER pgbouncer WITH PASSWORD :'pgpass';
ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass';
ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass';
ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass';

View file

@ -0,0 +1,153 @@
BEGIN;
CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions;
CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin;
GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role;
CREATE TABLE supabase_functions.migrations (
version text PRIMARY KEY,
inserted_at timestamptz NOT NULL DEFAULT NOW()
);
INSERT INTO supabase_functions.migrations (version) VALUES ('initial');
CREATE TABLE supabase_functions.hooks (
id bigserial PRIMARY KEY,
hook_table_id integer NOT NULL,
hook_name text NOT NULL,
created_at timestamptz NOT NULL DEFAULT NOW(),
request_id bigint
);
CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name);
COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.';
CREATE FUNCTION supabase_functions.http_request()
RETURNS trigger
LANGUAGE plpgsql
AS $function$
DECLARE
request_id bigint;
payload jsonb;
url text := TG_ARGV[0]::text;
method text := TG_ARGV[1]::text;
headers jsonb DEFAULT '{}'::jsonb;
params jsonb DEFAULT '{}'::jsonb;
timeout_ms integer DEFAULT 1000;
BEGIN
IF url IS NULL OR url = 'null' THEN
RAISE EXCEPTION 'url argument is missing';
END IF;
IF method IS NULL OR method = 'null' THEN
RAISE EXCEPTION 'method argument is missing';
END IF;
IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN
headers = '{"Content-Type": "application/json"}'::jsonb;
ELSE
headers = TG_ARGV[2]::jsonb;
END IF;
IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN
params = '{}'::jsonb;
ELSE
params = TG_ARGV[3]::jsonb;
END IF;
IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN
timeout_ms = 1000;
ELSE
timeout_ms = TG_ARGV[4]::integer;
END IF;
CASE
WHEN method = 'GET' THEN
SELECT http_get INTO request_id FROM net.http_get(url, params, headers, timeout_ms);
WHEN method = 'POST' THEN
payload = jsonb_build_object(
'old_record', OLD, 'record', NEW, 'type', TG_OP,
'table', TG_TABLE_NAME, 'schema', TG_TABLE_SCHEMA
);
SELECT http_post INTO request_id FROM net.http_post(url, payload, params, headers, timeout_ms);
ELSE
RAISE EXCEPTION 'method argument % is invalid', method;
END CASE;
INSERT INTO supabase_functions.hooks (hook_table_id, hook_name, request_id)
VALUES (TG_RELID, TG_NAME, request_id);
RETURN NEW;
END
$function$;
DO
$$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'supabase_functions_admin') THEN
CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
END IF;
END
$$;
GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin;
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin;
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin;
ALTER USER supabase_functions_admin SET search_path = "supabase_functions";
ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin;
ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin;
ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin;
GRANT supabase_functions_admin TO postgres;
DO
$$
BEGIN
IF EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'supabase_pg_net_admin') THEN
REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin;
DROP OWNED BY supabase_pg_net_admin;
DROP ROLE supabase_pg_net_admin;
END IF;
END
$$;
DO
$$
BEGIN
IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'pg_net') THEN
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
END IF;
END
$$;
CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access()
RETURNS event_trigger
LANGUAGE plpgsql
AS $$
BEGIN
IF EXISTS (
SELECT 1 FROM pg_event_trigger_ddl_commands() AS ev
JOIN pg_extension AS ext ON ev.objid = ext.oid
WHERE ext.extname = 'pg_net'
) THEN
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
END IF;
END;
$$;
COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net';
DO
$$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_event_trigger WHERE evtname = 'issue_pg_net_access') THEN
CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION')
EXECUTE PROCEDURE extensions.grant_pg_net_access();
END IF;
END
$$;
INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants');
ALTER function supabase_functions.http_request() SECURITY DEFINER;
ALTER function supabase_functions.http_request() SET search_path = supabase_functions;
REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC;
GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role;
COMMIT;

View file

@ -0,0 +1,255 @@
api:
enabled: true
address: 0.0.0.0:9001
sources:
docker_host:
type: docker_logs
exclude_containers:
- supabase-vector
transforms:
project_logs:
type: remap
inputs:
- docker_host
source: |-
.project = "default"
.event_message = del(.message)
.appname = del(.container_name)
del(.container_created_at)
del(.container_id)
del(.source_type)
del(.stream)
del(.label)
del(.image)
del(.host)
del(.stream)
router:
type: route
inputs:
- project_logs
route:
kong: '.appname == "supabase-kong"'
auth: '.appname == "supabase-auth"'
rest: '.appname == "supabase-rest"'
realtime: '.appname == "realtime-dev.supabase-realtime"'
storage: '.appname == "supabase-storage"'
functions: '.appname == "supabase-edge-functions"'
db: '.appname == "supabase-db"'
kong_logs:
type: remap
inputs:
- router.kong
source: |-
req, err = parse_nginx_log(.event_message, "combined")
if err == null {
.timestamp = req.timestamp
.metadata.request.headers.referer = req.referer
.metadata.request.headers.user_agent = req.agent
.metadata.request.headers.cf_connecting_ip = req.client
.metadata.response.status_code = req.status
url, split_err = split(req.request, " ")
if split_err == null {
.metadata.request.method = url[0]
.metadata.request.path = url[1]
.metadata.request.protocol = url[2]
}
}
if err != null {
abort
}
kong_err:
type: remap
inputs:
- router.kong
source: |-
.metadata.request.method = "GET"
.metadata.response.status_code = 200
parsed, err = parse_nginx_log(.event_message, "error")
if err == null {
.timestamp = parsed.timestamp
.severity = parsed.severity
.metadata.request.host = parsed.host
.metadata.request.headers.cf_connecting_ip = parsed.client
url, err = split(parsed.request, " ")
if err == null {
.metadata.request.method = url[0]
.metadata.request.path = url[1]
.metadata.request.protocol = url[2]
}
}
if err != null {
abort
}
auth_logs:
type: remap
inputs:
- router.auth
source: |-
parsed, err = parse_json(.event_message)
if err == null {
.metadata.timestamp = parsed.time
.metadata = merge!(.metadata, parsed)
}
rest_logs:
type: remap
inputs:
- router.rest
source: |-
parsed, err = parse_regex(.event_message, r'^(?P<time>.*): (?P<msg>.*)$')
if err == null {
.event_message = parsed.msg
.timestamp = parse_timestamp!(value: parsed.time,format: "%d/%b/%Y:%H:%M:%S %z")
.metadata.host = .project
}
realtime_logs_filtered:
type: filter
inputs:
- router.realtime
condition: '!contains(string!(.event_message), "/health")'
realtime_logs:
type: remap
inputs:
- realtime_logs_filtered
source: |-
.metadata.project = del(.project)
.metadata.external_id = .metadata.project
parsed, err = parse_regex(.event_message, r'^(?P<time>\d+:\d+:\d+\.\d+) \[(?P<level>\w+)\] (?P<msg>.*)$')
if err == null {
.event_message = parsed.msg
.metadata.level = parsed.level
}
functions_logs:
type: remap
inputs:
- router.functions
source: |-
.metadata.project_ref = del(.project)
storage_logs:
type: remap
inputs:
- router.storage
source: |-
.metadata.project = del(.project)
.metadata.tenantId = .metadata.project
parsed, err = parse_json(.event_message)
if err == null {
.event_message = parsed.msg
.metadata.level = parsed.level
.metadata.timestamp = parsed.time
.metadata.context[0].host = parsed.hostname
.metadata.context[0].pid = parsed.pid
}
db_logs:
type: remap
inputs:
- router.db
source: |-
.metadata.host = "db-default"
.metadata.parsed.timestamp = .timestamp
parsed, err = parse_regex(.event_message, r'.*(?P<level>INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC?):.*', numeric_groups: true)
if err != null || parsed == null {
.metadata.parsed.error_severity = "info"
}
if parsed.level != null {
.metadata.parsed.error_severity = parsed.level
}
if .metadata.parsed.error_severity == "info" {
.metadata.parsed.error_severity = "log"
}
.metadata.parsed.error_severity = upcase!(.metadata.parsed.error_severity)
sinks:
logflare_auth:
type: 'http'
inputs:
- auth_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 30
retry_initial_backoff_secs: 1
headers:
x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
uri: 'http://analytics:4000/api/logs?source_name=gotrue.logs.prod'
logflare_realtime:
type: 'http'
inputs:
- realtime_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 30
retry_initial_backoff_secs: 1
headers:
x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
uri: 'http://analytics:4000/api/logs?source_name=realtime.logs.prod'
logflare_rest:
type: 'http'
inputs:
- rest_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 30
retry_initial_backoff_secs: 1
headers:
x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
uri: 'http://analytics:4000/api/logs?source_name=postgREST.logs.prod'
logflare_db:
type: 'http'
inputs:
- db_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 30
retry_initial_backoff_secs: 1
headers:
x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
uri: 'http://analytics:4000/api/logs?source_name=postgres.logs'
logflare_functions:
type: 'http'
inputs:
- functions_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 30
retry_initial_backoff_secs: 1
headers:
x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
uri: 'http://analytics:4000/api/logs?source_name=deno-relay-logs'
logflare_storage:
type: 'http'
inputs:
- storage_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 30
retry_initial_backoff_secs: 1
headers:
x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
uri: 'http://analytics:4000/api/logs?source_name=storage.logs.prod.2'
logflare_kong:
type: 'http'
inputs:
- kong_logs
- kong_err
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 30
retry_initial_backoff_secs: 1
headers:
x-api-key: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
uri: 'http://analytics:4000/api/logs?source_name=cloudflare.logs.prod'

File diff suppressed because one or more lines are too long

View file

@ -6,8 +6,8 @@ buildGoModule rec {
src = fetchgit {
url = "https://git.cloonar.com/Paraclub/ai-mailer.git";
rev = "e88ac7caff72ffee206dc931f9e16b460d205f7e";
sha256 = "sha256-eafDeXvslj3P3TOcng1zObP/Vyva7GY/eSstmKynnBI=";
rev = "1d03d584c2da3ce7ce5761f03fa0a7daadc23471";
sha256 = "sha256-kXW15cX9WTQQejAC+Rs3KQwpRHCQA34oT07RX9Vibp0=";
};
vendorHash = "sha256-cEnb629V1dylMQfmB/8qv9gl1+T72rlkEd4wcsterXE=";