8.9 KiB
8.9 KiB
Deploying the Arcology
- NEXT Bootstrapping on non-NixOS
- Running on The Wobserver, self-hosting Arcology with the The Arroyo Generators
- NixOS module
- finish this
- validate the environment variables are used
- NEXT consider generating a local settings.py with our configuration overrides
- NEXT figure out better way to call in to Arcology and Arroyo in the service definition
- NEXT service hardening
- static files under gunicorn/nginx
- secret infrastructure for the syncthing key
- NixOS module
NEXT Bootstrapping on non-NixOS
nix run
ingestfiles and generator commands to stand up a system. core for Rebuild of The Complete Computer.
Running on The Wobserver, self-hosting Arcology with the The Arroyo Generators
Package building is handled in the Arcology Project Scaffolding.
Deployment declaration is in the Arcology Project Configuration.
NixOS module
provide it in the flake.nix
too…
We need two services, the watchsync
server and a gunicorn
. We need a virtualhost.
{ lib, config, pkgs, ... }:
with pkgs;
with lib;
let
cfg = config.services.arcology-ng;
# might want to generate a localsettings.py or so to configure the application for deployment, rather than use process env
env = {
ARCOLOGY_ENVIRONMENT = cfg.environment;
ARCOLOGY_BASE_DIR = cfg.orgDir;
ARCOLOGY_STATIC_ROOT = cfg.staticRoot;
ARCOLOGY_DB_PATH = "${cfg.dataDir}/databases/arcology2.db";
ARCOLOGY_ALLOWED_HOSTS = concatStringsSep "," cfg.domains;
ARCOLOGY_LOG_LEVEL = cfg.logLevel;
ARCOLOGY_CACHE_PATH = cfg.cacheDir;
PROMETHEUS_MULTIPROC_DIR = cfg.multiProcDir;
GUNICORN_CMD_ARGS = "--bind=${cfg.address}:${toString cfg.port} -w ${toString cfg.workerCount}";
};
pyenv = pkgs.python3.withPackages(pp: [cfg.packages.arcology]);
svcConfig = {
system.activationScripts.arcology-collectfiles.text = ''
echo "Setting up Arcology static files"
ARCOLOGY_STATIC_ROOT=${cfg.staticRoot} ${cfg.packages.arcology}/bin/arcology collectstatic --no-input -c -v0
echo "Ensuring Arcology directories exist"
mkdir -p ${cfg.dataDir} ${cfg.multiProcDir} ${cfg.cacheDir}
chown arcology:arcology ${cfg.dataDir} ${cfg.multiProcDir} ${cfg.cacheDir}
'';
systemd.services.arcology2-watchsync = {
description = "Arcology Django Syncthing Watcher";
after = ["network.target"];
wantedBy = ["multi-user.target"];
environment = env;
preStart = ''
${cfg.packages.arcology}/bin/arcology migrate
${cfg.packages.arcology}/bin/arcology seed || true
'';
script = ''
${cfg.packages.arcology}/bin/arcology watchsync -f ${cfg.folderId}
'';
serviceConfig = {
Type = "simple";
User = "arcology";
Group = "arcology";
WorkingDirectory = cfg.dataDir;
EnvironmentFile = cfg.environmentFile;
Restart="on-failure";
RestartSec=5;
RestartSteps=10;
RestartMaxDelaySec="1min";
# hardening...
};
};
systemd.services.arcology2-web = {
description = "Arcology Django Gunicorn";
after = ["network.target"];
wantedBy = ["multi-user.target"];
environment = env;
preStart = ''
find ${cfg.multiProcDir} -type f -delete
'';
script = ''
${pyenv}/bin/python -m gunicorn arcology.wsgi
'';
serviceConfig = {
Type = "simple";
User = "arcology";
Group = "arcology";
WorkingDirectory = cfg.dataDir;
EnvironmentFile = cfg.environmentFile;
Restart="on-failure";
RestartSec=5;
RestartSteps=10;
RestartMaxDelaySec="1min";
# hardening...
};
};
};
domainVHosts = {
services.nginx.virtualHosts."${head cfg.domains}" = mkIf (cfg.enable && cfg.generateVirtualHosts) {
serverAliases = tail cfg.domains;
locations."/".proxyPass = "http://${cfg.address}:${toString cfg.port}";
locations."/".extraConfig = ''
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header Host $host;
'';
locations."/static/".alias = cfg.staticRoot;
};
};
userConfig = {
ids.uids.arcology = 900;
ids.gids.arcology = 900;
users.users.arcology = {
group = "arcology";
home = cfg.dataDir;
createHome = true;
shell = "${bash}/bin/bash";
isSystemUser = true;
uid = config.ids.uids.arcology;
};
users.groups.arcology = {
gid = config.ids.gids.arcology;
};
};
in {
options = {
services.arcology-ng = {
enable = mkEnableOption "arcology-ng";
packages.arcology = mkOption {
type = types.package;
description = mdDoc ''
'';
};
domains = mkOption {
type = types.listOf types.str;
};
address = mkOption {
type = types.str;
default = "localhost";
description = lib.mdDoc "Web interface address.";
};
port = mkOption {
type = types.port;
default = 29543;
description = lib.mdDoc "Web interface port.";
};
environment = mkOption {
type = types.enum ["production" "development"];
default = "production";
};
workerCount = mkOption {
type = types.number;
default = 16;
description = lib.mdDoc "gunicorn worker count; they recommend 2-4 workers per core.";
};
generateVirtualHosts = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc "control whether nginx virtual hosts should be created";
};
dataDir = mkOption {
type = types.path;
default = "/var/lib/arcology";
description = mdDoc ''
Directory to store Arcology cache files, database, etc. Service User's home directory.
'';
};
logLevel = mkOption {
type = types.enum ["ERROR" "WARN" "INFO" "DEBUG"];
default = "INFO";
description = mdDoc ''
Set the Django root logging level
'';
};
environmentFile = mkOption {
type = types.path;
default = "${cfg.dataDir}/env";
description = mdDoc ''
A file containing environment variables you may not want to put in the nix store.
For example, you could put a syncthing key
and a bearer token for the Local API in there:
ARCOLOGY_SYNCTHING_KEY=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA;
ARCOLOGY_LOCALAPI_BEARER_TOKEN=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA;
'';
};
staticRoot = mkOption {
type = types.path;
default = "/var/lib/arcology/static/";
description = ''
Location where django-manage collectfiles will write the files to. If
you let this module generate nginx virtualhosts it will be configured
to use that for static files. Ensure this ends with a backslash.
'';
};
orgDir = mkOption {
type = types.path;
description = mdDoc ''
Directory containing the org-mode documents.
Arcology needs read-only access to this directory.
'';
};
folderId = mkOption {
type = types.str;
description = mdDoc ''
Syncthing folder ID containing the org files.
'';
};
cacheDir = mkOption {
type = types.path;
default = "${cfg.dataDir}/cache/";
description = mdDoc ''
Location to cache HTML files and the like.
'';
};
multiProcDir = mkOption {
type = types.path;
default = "${cfg.dataDir}/metrics/";
description = mdDoc ''
Location where prometheus will cache metrics to be coalesced on all workers.
See https://github.com/korfuri/django-prometheus/blob/master/documentation/exports.md
'';
};
};
};
config = mkIf cfg.enable (mkMerge [
svcConfig
domainVHosts
# prevent double-definition of user entities from previous service's manifest
# yanked directly from arcology-fastapi
userConfig
]);
}
DONE finish this
- State "DONE" from "INPROGRESS" [2024-02-15 Thu 12:07]
- State "INPROGRESS" from "NEXT"
DONE validate the environment variables are used
- State "DONE" from "NEXT" [2024-02-15 Thu 12:07]
NEXT consider generating a local settings.py with our configuration overrides
NEXT figure out better way to call in to Arcology and Arroyo in the service definition
probably just getFlake
but augh.
NEXT service hardening
DONE static files under gunicorn/nginx
- State "DONE" from "INPROGRESS" [2024-02-15 Thu 19:44]
- State "INPROGRESS" from "NEXT" [2024-02-15 Thu 12:08]
CLOCK: [2024-02-15 Thu 12:08]–[2024-02-15 Thu 12:58] => 0:50
DONE secret infrastructure for the syncthing key
- State "DONE" from "NEXT" [2024-02-15 Thu 19:44]
or a way to load that in to the DB 🤔