Compare commits

...

7 Commits

9 changed files with 354 additions and 329 deletions

View File

@ -1,29 +0,0 @@
:PROPERTIES:
:ID: 20220218T215521.617327
:ROAM_REFS: https://github.com/hifi/heisenbridge
:END:
#+TITLE: Heisenbridge
#+ARCOLOGY_ALLOW_CRAWL: t
#+ARCOLOGY_KEY: cce/wobserver/matrix/heisenbridge
Heisenbridge is a [[id:matrix_org_ecosystem][Matrix]] app-service designed to be used as a single-user "bouncer" style IRC client rather than a "room mirror" like =matrix-appservice-irc=.
It's primarily operated via chat commands. I'm basically trusting the [[id:c75d20e6-8888-4c5a-ac97-5997e2f1c711][nixpkgs]] configuration.
,#+ARROYO_NIXOS_MODULE: nixos/heisenbridge.nix
#+ARROYO_NIXOS_ROLE: server
#+begin_src nix :tangle ~/arroyo-nix/nixos/heisenbridge.nix
{ ... }:
{
services.matrix-synapse.settings.app_service_config_files = ["/var/lib/heisenbridge/registration.yml"];
services.heisenbridge = {
enable = true;
debug = true;
homeserver = "http://localhost:8008";
owner = "@rrix:kickass.systems";
};
}
#+end_src

View File

@ -26,7 +26,22 @@ This is an [[id:arroyo/nixos][Arroyo NixOS]] module used in [[id:20211120T220054
{ ... }:
let useSSL = false; # for VM testing... should make this an option...
clientConfig."m.homeserver".base_url = "https://matrix.fontkeming.fail/";
serverConfig."m.server" = "matrix.fontkeming.fail:443";
mkWellKnown = data: ''
#
add_header Content-Type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '${builtins.toJSON data}';
'';
in {
imports = [
<arroyo/nixos/matrix-puppet-discord.nix>
<arroyo/nixos/heisenbridge.nix>
<arroyo/nixos/matrix-prometheus.nix>
];
services.postgresql.ensureDatabases = ["matrix-synapse"];
services.postgresql.ensureUsers = [
{
name = "matrix-synapse";
@ -36,41 +51,54 @@ in {
}
];
users.users.matrix-synapse.createHome = true;
users.users.matrix-synapse.home = "/svc/matrix-synapse";
users.users.matrix-synapse = {
isSystemUser = true;
createHome = true;
home = "/srv/matrix-synapse";
group = "matrix-synapse";
};
services.nginx.virtualHosts."matrix.fontkeming.fail" = {
listen = [
{ addr = "0.0.0.0"; port = 8448; }
{ addr = "0.0.0.0"; port = 80; }
# { addr = "0.0.0.0"; port = 443; ssl = true; }
];
forceSSL = useSSL;
locations."/_matrix" = {
proxyPass = "http://127.0.0.1:8008/_matrix";
extraConfig = ''
client_max_body_size 16m;
access_log off;
'';
};
locations."/_synapse/client".proxyPass = "http://127.0.0.1:8008";
locations."/_matrix".proxyPass = "http://127.0.0.1:8008";
locations."/_matrix".extraConfig = ''
client_max_body_size 16m;
access_log off;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
'';
locations."= /.well-known/matrix/server".extraConfig = mkWellKnown serverConfig;
locations."= /.well-known/matrix/client".extraConfig = mkWellKnown clientConfig;
};
services.matrix-synapse = {
enable = true;
dataDir = "/svc/matrix-synapse";
dataDir = "/srv/matrix-synapse";
settings = {
server_name = "kickass.systems";
public_baseurl = "https://matrix.fontkeming.fail";
enable_registration = false;
enable_metrics = true;
report_stats = true;
url_preview_enabled = true;
database.name = "psycopg2";
database.args.user = "synapse";
database.args.user = "matrix-synapse";
database.args.database = "synapse";
# god damnit ensureDatabases gives me a utf8
database.allow_unsafe_locale = true;
secondary_directory_servers = [
"matrix.org"
@ -112,7 +140,7 @@ in {
port = 8008;
resources = [
{ compress = true;
names = [ "client" ]; }
names = [ "client" "metrics" ]; }
{ compress = false;
names = [ "federation" ]; } ];
tls = false;
@ -125,19 +153,18 @@ in {
}
#+end_src
* CANCELLED mx-puppet-discord
* mx-puppet-discord
:PROPERTIES:
:ID: 20220218T215648.615157
:ROAM_REFS: https://github.com/matrix-discord/mx-puppet-discord
:END:
:LOGBOOK:
- State "CANCELLED" from "INPROGRESS" [2022-05-11 Wed 19:09]
:END:
=mx-puppet-discord= allows a savvy Discord user to log in to their discord guilds and chat in them through a Matrix client. Definitely don't [[https://github.com/matrix-discord/mx-puppet-discord#linking-your-discord-account][do a crimes]] with it, I'm sure I'll be banned for this indiscretion some day but this bridging is the core value proposition of Matrix for me.
It's primarily operated via chat commands. I am mostly trusting the default [[id:c75d20e6-8888-4c5a-ac97-5997e2f1c711][nixpkgs]] configuration.
One thing I had to do to make this configuration work is to manually move that =app_service_config_file= from =/var/lib/mx-puppet-discord/discord-registration.yaml=. Since it's written by a [[https://0pointer.net/blog/dynamic-users-with-systemd.html][SystemD =DynamicUser=]], it's not clear to me how to write this file with a group ID that Synapse shares..
#+begin_src nix :tangle ~/arroyo-nix/nixos/matrix-puppet-discord.nix
{ ... }:
@ -145,22 +172,168 @@ It's primarily operated via chat commands. I am mostly trusting the default [[id
services.mx-puppet-discord.enable = true;
# services.matrix-synapse.extraConfig = ''
# '';
services.matrix-synapse.settings.app_service_config_files = ["/srv/matrix-synapse/discord-registration.yaml"];
services.mx-puppet-discord.settings = {
database.filename = "/svc/matrix-puppet-discord/database.db";
bridge = {
port = 8091;
bindAddress = 0.0.0.0;
bindAddress = "0.0.0.0";
domain = "kickass.systems";
homeserverUrl = "https://matrix.fontkeming.fail";
homeserverUrl = "http://127.0.0.1:8008";
};
provisioning.whitelist = ["@rrix:kickass\\.systems"];
};
}
#+end_src
* Heisenbridge
:PROPERTIES:
:ID: 20220218T215521.617327
:ROAM_REFS: https://github.com/hifi/heisenbridge
:END:
Heisenbridge is a [[id:matrix_org_ecosystem][Matrix]] app-service designed to be used as a single-user "bouncer" style IRC client rather than a "room mirror" like =matrix-appservice-irc=.
It's primarily operated via chat commands. I'm basically trusting the [[id:c75d20e6-8888-4c5a-ac97-5997e2f1c711][nixpkgs]] configuration.
#+begin_src nix :tangle ~/arroyo-nix/nixos/heisenbridge.nix
{ ... }:
{
services.matrix-synapse.settings.app_service_config_files = ["/var/lib/heisenbridge/registration.yml"];
users.users.matrix-synapse.extraGroups = ["heisenbridge"]; # to access registration file
services.heisenbridge = {
enable = true;
debug = true;
homeserver = "http://localhost:8008";
owner = "@rrix:kickass.systems";
};
}
#+end_src
* Synapse Prometheus Recording Rules
:PROPERTIES:
:END:
#+begin_src nix :tangle ~/arroyo-nix/nixos/matrix-prometheus.nix
{ ... }:
{
services.prometheus.ruleFiles = [
<arroyo/files/synapse.rules>
];
}
#+end_src
#+begin_src yaml :tangle ~/arroyo-nix/files/synapse.rules
groups:
- name: synapse_federation_transaction_queue_pendingEdus:total
rules:
- record: synapse_federation_transaction_queue_pendingEdus:total
expr: sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
- name: synapse_federation_transaction_queue_pendingPdus:total
rules:
- record: synapse_federation_transaction_queue_pendingPdus:total
expr: sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
- name: synapse_http_server_requests:methodservlet
rules:
- record: synapse_http_server_requests:method
expr: sum(synapse_http_server_requests) by (method)
labels:
servlet: ""
- name: synapse_http_server_requests:servletmethod
rules:
- record: synapse_http_server_requests:servlet
expr: sum(synapse_http_server_requests) by (servlet)
labels:
method: ""
- name: synapse_http_server_requests:totalservlet
rules:
- record: synapse_http_server_requests:total
expr: sum(synapse_http_server_requests:by_method) by (servlet)
labels:
servlet: ""
- name: synapse_cache:hit_ratio_5m
rules:
- record: synapse_cache:hit_ratio_5m
expr: rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
- name: synapse_cache:hit_ratio_30s
rules:
- record: synapse_cache:hit_ratio_30s
expr: rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
- name: synapse_federation_client_senttypeEDU
rules:
- record: synapse_federation_client_sent
expr: synapse_federation_client_sent_edus + 0
labels:
type: EDU
- name: synapse_federation_client_senttypePDU
rules:
- record: synapse_federation_client_sent
expr: synapse_federation_client_sent_pdu_destinations:count + 0
labels:
type: PDU
- name: synapse_federation_client_senttypeQuery
rules:
- record: synapse_federation_client_sent
expr: sum(synapse_federation_client_sent_queries) by (job)
labels:
type: Query
- name: synapse_federation_server_receivedtypeEDU
rules:
- record: synapse_federation_server_received
expr: synapse_federation_server_received_edus + 0
labels:
type: EDU
- name: synapse_federation_server_receivedtypePDU
rules:
- record: synapse_federation_server_received
expr: synapse_federation_server_received_pdus + 0
labels:
type: PDU
- name: synapse_federation_server_receivedtypeQuery
rules:
- record: synapse_federation_server_received
expr: sum(synapse_federation_server_received_queries) by (job)
labels:
type: Query
- name: synapse_federation_transaction_queue_pendingtypeEDU
rules:
- record: synapse_federation_transaction_queue_pending
expr: synapse_federation_transaction_queue_pending_edus + 0
labels:
type: EDU
- name: synapse_federation_transaction_queue_pendingtypePDU
rules:
- record: synapse_federation_transaction_queue_pending
expr: synapse_federation_transaction_queue_pending_pdus + 0
labels:
type: PDU
#+end_src
* NEXT matrix-dimension
* NEXT mx-puppet-slack
* NEXT validate database setup
* NEXT evaluate mautrix services?
* NEXT evaluate matrix-homeserver
https://github.com/queezle42/matrix-homeserver

View File

@ -17,9 +17,12 @@
#+ARROYO_MODULE_WANTS: cce/async_forever.org
#+ARROYO_MODULE_WANTED: cce/run_hooks_after_init.org
#+begin_src emacs-lisp
#+begin_src emacs-lisp :tangle mbsync.el
(provide 'cce/mbsync)
(add-hook #'after-cce-hook (lambda () (cce/async-forever "date && mbsync -a" "*mbsync*" 600)))
(defun cce-run-mbsync-forever ()
(interactive)
(cce/async-forever "date && mbsync -a" "*mbsync*" 600))
(add-hook #'after-cce-hook #'cce-run-mbsync-forever)
#+end_src
I have mixed feelings about using [[http://isync.sourceforge.net/mbsync.html][mbsync]] in my [[id:26c9e4fd-4501-4b8b-95ce-a2a5230d7c1e][Email and News and Information Pipelines]], I've had enough issues with mail integrity that it's useful to have a [[id:cce/python][Python]] thing that I can muck around inside of with print statements to figure out what what the fuck my Maildir did to itself. I have negative feelings towards the way the project is operated, and the fact that the best documentation of the project is bunch of semi-hostile threads on a Sourceforge forum, and I'm not excited for Master/Slave terminology in a sync program...
@ -35,6 +38,10 @@ Here's the one for getting things from my server to my laptop. Most folks who se
{ config, pkgs, ... }:
{
imports = [
./mbsync-configuration.nix
./imapnotify-configuration.nix
];
programs.mbsync.enable = true;
accounts.email.maildirBasePath = "/home/rrix/Maildir";
accounts.email.accounts.fastmail = {
@ -47,44 +54,70 @@ Here's the one for getting things from my server to my laptop. Most folks who se
imap = {
host = "mail.messagingengine.com";
};
};
}
#+end_src
mbsync = {
enable = true;
subFolders = "Verbatim";
The last time I used =mbsync=, I experienced issues with mail IDs in my =newsrc.eld= file, which Gnus uses to store which lists I am subscribed too, and also functions as a cache for message and folder states. The latter caching was what caused these issues, I believe, but it was really difficult to debug it. If this happens again, I may switch to [[id:fa7e9d10-a98d-4036-a668-889bd1d3ea29][offlineimap]].
extraConfig = {
account = {
Timeout = 120;
};
#+begin_src nix :tangle ~/arroyo-nix/hm/imapnotify-configuration.nix
{ config, pkgs, ... }:
{
services.imapnotify.enable = true;
accounts.email.accounts.fastmail.imapnotify = {
enable = true;
boxes = ["INBOX"];
onNotify = ''
set -euxo pipefail
${pkgs.imapfilter}/bin/imapfilter -c ${<arroyo/files/imapfilter.lua>}
${pkgs.isync}/bin/mbsync -a
${pkgs.libnotify}/bin/notify-send 'New mail received'
'';
};
}
#+end_src
#+begin_src nix :tangle ~/arroyo-nix/hm/mbsync-configuration.nix
{ config, pkgs, ... }:
{
accounts.email.accounts.fastmail.mbsync = {
enable = true;
subFolders = "Verbatim";
extraConfig = {
account = {
Timeout = 120;
};
};
groups = {
all = {
channels = {
top = {
extraConfig.Create = "near";
patterns = [
"INBOX"
"1ml"
"1ml/bcz"
"1ml/friendsofsecurityplanner"
"Junk Mail"
"RecruitingSpam"
"Sent Mail"
"emacsconf"
"fedora/bugs"
"github"
"newsletters"
"phoenix-lug"
"social"
];
};
rest = {
extraConfig.Create = "near";
patterns = [
"*"
];
};
groups = {
all = {
channels = {
top = {
extraConfig.Create = "near";
patterns = [
"INBOX"
"1ml"
"1ml/bcz"
"1ml/friendsofsecurityplanner"
"Junk Mail"
"RecruitingSpam"
"Sent Mail"
"emacsconf"
"fedora/bugs"
"github"
"newsletters"
"phoenix-lug"
"social"
];
};
rest = {
extraConfig.Create = "near";
patterns = [
"*"
];
};
};
};
@ -92,6 +125,3 @@ Here's the one for getting things from my server to my laptop. Most folks who se
};
}
#+end_src
The last time I used =mbsync=, I experienced issues with mail IDs in my =newsrc.eld= file, which Gnus uses to store which lists I am subscribed too, and also functions as a cache for message and folder states. The latter caching was what caused these issues, I believe, but it was really difficult to debug it. If this happens again, I may switch to [[id:fa7e9d10-a98d-4036-a668-889bd1d3ea29][offlineimap]].

View File

@ -11,13 +11,14 @@ I use [[https://pipewire.org/][PipeWire]] because it implements [[id:jack_audio_
#+ARROYO_NIXOS_MODULE: nixos/audio.nix
#+ARROYO_NIXOS_ROLE: endpoint
#+ARROYO_NIXOS_ROLE: settop
#+ARROYO_HOME_MODULE: hm/audio.nix
#+ARCOLOGY_ALLOW_CRAWL: t
#+begin_src nix :tangle ~/arroyo-nix/nixos/audio.nix
{ pkgs, ... }:
with pkgs; {
environment.systemPackages = [ qjackctl easyeffects ];
environment.systemPackages = [ qjackctl easyeffects pulsemixer ];
hardware.bluetooth = {
enable = true;
@ -39,3 +40,12 @@ with pkgs; {
};
}
#+end_src
#+begin_src nix :tangle ~/arroyo-nix/hm/audio.nix
{ ... }:
{
services.easyeffects.enable = false;
# programs.dconf.enable = true;
}
#+end_src

View File

@ -62,17 +62,17 @@ My =org-roam= configuration is basically pedestrian, I can hit =<SPC>r= to get t
:bind (:map evil-leader--default-map :prefix "r" :prefix-map org-roam-prefix-map
("aa" . org-roam-alias-add)
("ar" . org-roam-alias-remove)
("ka" . org-roam-ref-add)
("kr" . org-roam-ref-remove)
("Ta" . org-roam-tag-add)
("Tr" . org-roam-tag-remove)
("f" . org-roam-node-find)
("g" . org-roam-show-graph)
("i" . org-roam-node-insert)
("k" . org-roam-ref-find)
("Ka" . org-roam-ref-add)
("Kr" . org-roam-ref-remove)
("K" . org-roam-ref-find)
("l" . org-roam-buffer-toggle)
("L" . org-roam-buffer-display-dedicated)
("r" . org-roam-node-random)
("Ta" . org-roam-tag-add)
("Tr" . org-roam-tag-remove))
("r" . org-roam-node-random))
:bind (:map evil-insert-state-map
("M-." . org-roam-node-insert)
("C-c r" . org-roam-node-insert)))

View File

@ -44,6 +44,17 @@ let
cp -r ./* $out/af_readability
'';
};
fever = pkgs.stdenv.mkDerivation rec {
pname = "tt-rss-plugin-fever";
version = "0.1.0";
src = pkgs.callPackage pkgs.lib.pkgVersions.fever_plugin {};
installPhase = ''
mkdir -p $out/fever
cp -r ./* $out/fever
'';
};
in {
services.tt-rss = {
enable = true;
@ -58,6 +69,7 @@ in {
wallabag
large_apod
readability
fever
];
plugins = [
"auth_internal"
@ -65,11 +77,18 @@ in {
"wallabag_v2"
"large_apod"
"af_readability"
"fever"
];
};
}
#+end_src
* Using =tt-rss= in [[id:cce/emacs][Emacs]]
:PROPERTIES:
:ID: 20231020T121209.730277
:END:
#+ARROYO_EMACS_MODULE: ttrss
#+begin_src emacs-lisp :tangle ~/org/cce/ttrss.el
@ -78,10 +97,8 @@ in {
(use-package elfeed-protocol
:config
(setq elfeed-use-curl t)
(setq elfeed-log-level 'debug)
(setq elfeed-protocol-log-trace t)
(elfeed-set-timeout 36000)
(setq elfeed-protocol-ttrss-maxsize 200)
; (setq elfeed-feeds '("ttrss+https://user:pass@feeds.whatthefuck.computer" ))
(elfeed-protocol-enable)
)
(elfeed-protocol-enable))
#+end_src

View File

@ -9,189 +9,10 @@
The [[https://github.com/sloonz/ua][Universal Aggregator]] is a powerful collection of tools designed to take a feed of data-items and store them in a =Maildir= folder. This can be used to create human-legible archive of messages, twitter posts, rss feeds and various other scraped-data. It is a suite of [[id:116cea50-60bd-4a9b-97e4-0f988cdafc56][Golang]] operator programs and some scrapers written in [[id:84e53104-c534-4963-ae42-8197bb54f8e2][JavaScript]] which I do not use.
* Container build using [[id:ea955c0f-a757-4f1f-ac80-fc2248c33e4b][Ansible Bender]] and [[id:cce/podman][Podman]]
This uses [[id:cce/dynamic_ansible_bender_playbooks][Dynamic Ansible Bender playbooks]].
#+begin_src shell :results drawer
cat <<EOF | python ~/org/cce/make-container-playbook.py
template: ~/org/cce/containers/simple.yml
friendly: Universal Aggregator
service_name: ua
cmd: ggs /data/ggsrc
build_roles:
- universal-aggregator-build
build_reqs:
- git
- make
- golang
- glib2-devel
- libxml2-devel
- python-devel
- python-pip
task_tags:
- comms
- universal-aggregators
EOF
#+end_src
#+results:
:results:
[[shell:ANSIBLE_ROLES_PATH=~/org/cce/roles ansible-bender build /home/rrix/org/cce/containers/ua/build.yml][Execute =ANSIBLE_ROLES_PATH=~/org/cce/roles ansible-bender build /home/rrix/org/cce/containers/ua/build.yml=]]
:end:
The Dockerfile I originally wrote for this uses =gosu= to set the UID and GID to my user; instead systemd/podman will just run this container as my user. the container can be started with =/usr/bin/ggs ggsrc-path= where ggsrc-path is probably mounted in to the container for my own sake.
Installing UA is a simple Makefile-based affair:
#+begin_src yaml :tangle roles/universal-aggregator-build/tasks/main.yml
- name: ua cloned and up to date on personal branch
become: yes
become_user: "{{local_account}}"
git:
repo: https://code.rix.si/upstreams/ua/
dest: "{{build_dir}}"
version: rrix
- name: compile universal aggregator
become: yes
become_user: "{{local_account}}"
shell:
chdir: "{{build_dir}}"
cmd: make
creates:
- "{{build_dir}}/ua-inline/ua-inline"
- "{{build_dir}}/ua-maildir/ua-maildir"
- "{{build_dir}}/ggs/ggs"
- name: install universal aggregator
shell:
chdir: "{{build_dir}}"
cmd: make install
creates: /usr/local/bin/ua-inline
- name: make clean
tags:
- postbuild
shell:
chdir: "{{build_dir}}"
cmd: make clean
- name: jq installed
dnf:
state: installed
name: jq
#+end_src
My UA configuration uses tweepy and a custom twitter client to pull tweets in to my Maildir, make sure that ends up in the image even if I don't use it right now.
#+begin_src yaml :tangle roles/universal-aggregator-build/tasks/main.yml
- name: tweets.py is installed
template:
src: tweets.py
dest: /usr/local/bin/tweets.py
- name: tweets.py deps installed
pip:
state: present
name:
- tweepy
- click
- twitter-text-python
#+end_src
Need a way to inject the access tokens; template + ansible-vault probably!
#+begin_src python :tangle containers/ua/tweets.py
from __future__ import print_function
import tweepy
from email import utils
import time
import json
import click
from ttp import ttp
from ttp import utils as twutils
# auth = tweepy.OAuthHandler("", "")
# auth.set_access_token("", "")
api = tweepy.API(auth)
parser = ttp.Parser()
@click.group()
def cli():
pass
def make_2822_date(dt):
tup = dt.timetuple()
flt = time.mktime(tup)
return utils.formatdate(flt)
def render_tweet(status, retweeter=None):
parsed = parser.parse(status.full_text)
body = u'<a href="https://twitter.com/{twuser}/status/{twid}">{twuser}</a>: '.format(
twuser=status.user.screen_name,
twid=str(status.id)
)
body += parsed.html
urls = twutils.follow_shortlinks(parsed.urls)
for small, rest in urls.items():
body = body.replace(small, rest[-1])
references = None
if retweeter:
body += u'<br/> Retweeted by <a href="https://twitter.com/@{twuser}/">{twname}</a>.'.format(twuser=retweeter.screen_name,
twname=retweeter.name)
if status.entities.get("media") and len(status.entities["media"]) > 0:
for medium in (status.entities["media"]):
body += u'<br/><img src="{twimg}"/>'.format(
twimg=medium[u"media_url_https"]
)
if status.in_reply_to_status_id:
body += u'<br/> <a href="https://twitter.com/{twuser}/status/{twid}">in reply to {twuser}</a>'.format(
twuser=status.in_reply_to_screen_name,
twid=status.in_reply_to_status_id_str
)
references = [status.in_reply_to_status_id_str]
return {
'author': status.author.name,
'title': status.full_text,
'id': status.user.screen_name + "_" + status.id_str,
'date': make_2822_date(status.created_at),
'body': body,
'references': references,
'authorEmail': status.user.screen_name + "@twitter.com"
}
def process_tweet(status):
if status._json.get("retweeted_status"):
return render_tweet(status.retweeted_status, retweeter=status.user)
else:
return render_tweet(status)
@cli.command()
def home():
tweets = api.home_timeline(tweet_mode="extended")
for tweet in tweets:
print(json.dumps(process_tweet(tweet)))
@cli.command()
@click.option('--owner', type=str)
@click.option('--slug', type=str)
def list(owner, slug):
tweets = api.list_timeline(owner, slug, tweet_mode='extended')
for tweet in tweets:
print(json.dumps(process_tweet(tweet)))
if __name__ == '__main__':
cli()
#+end_src
** NEXT load tokens from file...
* Usage
: this is deprecated; in theory [[id:arroyo/feed-cache][Arroyo Feed Cache Generator]] still works, but I don't use any of this any more.
Universal Aggregator is composed of a number of components, starting with the "grey goo spawner" =ggs=, which is a [[id:116cea50-60bd-4a9b-97e4-0f988cdafc56][Golang]] program designed to run commands at intervals, and is designed to handle concurrency and work-sharing reasonably. In a really remarkable set of choices, =ggs= embeds a =ggsrc= file inside of a [[https://github.com/sloonz/ua/blob/master/ggs/ggs.go#L29][CONFIG_WRAPPER]] and then runs that like it's a shell script. *The =ggsrc= file is for all intents and purposes a shell script* and I can use this to my advantage to provide multiple paths for bringing data in to the system, running the shell script with an alternate =rss= command defined for example. I want to use this fact to also provide multiple paths for *bringing data out of the system*. By having functions which are defined differently depending on whether they are being run within =ggs= or =Emacs= a system for verifying feeds and inspecting their state can be built within [[id:1fb8fb45-fac5-4449-a347-d55118bb377e][org-mode]], a small piece of [[id:128ab0e8-a1c7-48bf-9efe-0c23ce906a48][Hypermedia]] which presents the state of the feed alongside the feed itself.
The files are generated and written to my homeserver with the [[id:arroyo/feed-cache][Arroyo Feed Cache]].
@ -229,6 +50,9 @@ And some overflow:
#+ARROYO_FEEDS: ggs/70-overflow.ggs
:end:
| NWS Seattle Area Forecast Discussion | https://afd.fontkeming.fail/AFDSEW.xml | News |
| King County Metro Alerts | https://kcmetro-rss.buttslol.net/D/40 | News |
#+NAME: overflow
| Lectronice's Tokipona Blog | https://tokipona.lectronice.com/atom/d | Media |
| Polygon - All | https://www.polygon.com/rss/index.xml | Media |
@ -238,5 +62,3 @@ And some overflow:
| Privacy Enhancing Tech Symposium | https://www.youtube.com/feeds/videos.xml?channel_id=UC-m6oi7a-8LffTk64J3tq-w | Videos |
| My SongKick feeds | http://acousti.co/feeds/upcoming/songkick-670 | Art |
| NWS Seattle Area Forecast Discussion | https://afd.fontkeming.fail/AFDSEW.xml | News |
| King County Metro Alerts | https://kcmetro-rss.buttslol.net/D/40 | News |

View File

@ -67,11 +67,13 @@ homeManager = _: builtins.fetchGit {
By structuring these invocations like this it is possible to write a function contained in my [[id:20220913T104837.013589][nix-update]] page which will iterate over all the call sections and update the =builtins.fetchGit= entities, and then update the revisions and =sha256= of the rest of the document, and safely tangle the new values out on save. This is probably a useful pattern in developing [[id:128ab0e8-a1c7-48bf-9efe-0c23ce906a48][Hypermedia]] in org-mode.
* NEXT update my [[id:c75d20e6-8888-4c5a-ac97-5997e2f1c711][NixOS]] version pins and deploy
SCHEDULED: <2023-09-22 Fri .+2w>
SCHEDULED: <2023-12-13 Wed .+2w>
:PROPERTIES:
:LAST_REPEAT: [2023-09-08 Fri 18:42]
:LAST_REPEAT: [2023-11-29 Wed 17:11]
:END:
:LOGBOOK:
- State "DONE" from "NEXT" [2023-11-29 Wed 17:11]
- State "DONE" from "NEXT" [2023-11-12 Sun 21:52]
- State "DONE" from "NEXT" [2023-09-08 Fri 18:42]
- State "DONE" from "NEXT" [2023-08-17 Thu 19:00]
- State "DONE" from "NEXT" [2023-07-27 Thu 15:37]
@ -132,7 +134,7 @@ Right now I am running off a branch of [[id:c75d20e6-8888-4c5a-ac97-5997e2f1c711
#+NAME: prefetch-hm
#+results:
: "5bac4a1c06cd77cf8fc35a658ccb035a6c50cd2c"
: "28535c3a34d79071f2ccb68671971ce0c0984d7e"
#+begin_src nix :noweb-ref homeManager :noweb yes
homeManager = _: builtins.fetchGit {
@ -150,7 +152,7 @@ homeManager = _: builtins.fetchGit {
#+NAME: prefetch-em
#+results:
: "badf38fcef05e02764781847ffe498016401e5a5"
: "ffe08c51b289d92ee8d82f13dd069a54b8bdf3f1"
#+NAME: emacsOverlay
#+begin_src nix :noweb yes
@ -175,9 +177,9 @@ builds from https://codeberg.org/martianh/mastodon.el/commits/branch/main
#+begin_src nix :noweb-ref mastodon
mastodon = { pkgs, ... }: pkgs.fetchgit {
url = "https://codeberg.org/martianh/mastodon.el";
rev = "d4c105cc39315de3c9f3f29b97de0c0dec718770";
sha256 = "0jzgkbr7dmpv66cabmf8lnz3223m5vs25v06v27s1dfpy3grcxwf";
# date = "2023-08-31T12:12:05+02:00";
rev = "a8c80d25b7790746a439ae6c2deea3dc6bcac710";
sha256 = "143wmg9jhdi79y1gdi0y9xxpp8vyn7qbhvaysq1sf7g1h0jskxc2";
# date = "2023-10-30T20:22:18+01:00";
};
#+end_src
@ -213,9 +215,9 @@ consult-org-roam-rev = "268f436858e1ea3b263782af466a54e4d603a7d2";
consult-org-roam = {pkgs, ...}: pkgs.fetchFromGitHub {
owner = "jgru";
repo = "consult-org-roam";
rev = "2ca42a1c1641a29f1447d35be01bd1fda368a9e2";
sha256 = "142fra7wap6dfwd4c82j7z3nk1yw78slrwhjx6vkiql8ylbiw5fi";
# date = "2023-05-28T10:55:47+02:00";
rev = "47e43a7ffa703b1cc4c73cae953dadea4eff83df";
sha256 = "1f39k9mc8srghlbmncz811cdpzr5s5s99m9vclkda4hi7m34z93j";
# date = "2023-10-07T09:50:58+02:00";
};
#+end_src
@ -258,7 +260,7 @@ ement = rec {
src = { pkgs, ... }: pkgs.fetchurl {
url = "https://github.com/alphapapa/ement.el/archive/8aea26acefd9e3eafa24db240e41aa9d41603586.tar.gz";
sha256 = "1zs8j9zvwda029ld2lnqkw03i7zsibrdy68fpsz5ylw7czd6qfzi";
# date = "2023-09-08T18:39:41-0700";
# date = "2023-11-29T17:07:01-0800";
};
};
#+end_src
@ -408,25 +410,18 @@ ttrss_wallabag = { pkgs, ... }: pkgs.fetchFromGitHub {
# https://gitlab.tt-rss.org/tt-rss/plugins/ttrss-af-readability
ttrss_readability = { pkgs, ... }: pkgs.fetchgit {
url= "https://gitlab.tt-rss.org/tt-rss/plugins/ttrss-af-readability";
url = "https://gitlab.tt-rss.org/tt-rss/plugins/ttrss-af-readability";
rev = "cdc97d886cb7085f9c44a1796ee4bbbf57534d06";
sha256 = "sha256-Pbwp+s4G+mOwjseiejb0gbHpInc2lvR+sv85sRP/DVg=";
# date = "2021-03-14T01:26:43-05:00";
};
#+end_src
** [[id:20230629T103219.772151][Unofficial Homestuck Collection in NixOS]]
#+begin_src nix :noweb-ref tuhc
homestuck = rec {
pname = "unofficial-homestuck-collection";
version = "2.0.7";
src = { ... }: builtins.fetchurl {
url = "https://github.com/Bambosh/unofficial-homestuck-collection/releases/download/v${version}/The-Unofficial-Homestuck-Collection-${version}.AppImage";
sha256 = "error: unable to download 'https://github.com/Bambosh/unofficial-homestuck-collection/releases/download/v${version}/The-Unofficial-Homestuck-Collection-${version}.AppImage': HTTP error 404";
name = "${pname}-${version}.AppImage";
# date = "2023-09-08T18:40:49-0700";
};
sha256 = "0n0dzw9v2fgzn9zg95infwifkcc1yhv7m8n7isq67yh6rvx2kg1x";
# date = "2023-04-02T19:07:22+03:00";
};
fever_plugin = { pkgs, ... }: pkgs.fetchFromGitHub {
owner = "DigitalDJ";
repo = "tinytinyrss-fever-plugin";
rev = "cc297d8939f91e53bf873f8d982a7869916483df";
sha256 = "0qryndbh8liwyd6iilqagd29v0awmg8il4qrpdg1khac8nxa1cb9";
# date = "2023-10-28T00:29:41+10:30";
};
#+end_src

View File

@ -32,22 +32,23 @@ I need to set up alerts and dashboards for the most common operations, and I'd l
with pkgs.lib;
let mkStaticScrape =
(name: cfg:
let addr =
if hasAttr "listenAddr" cfg then
cfg.listenAddr
else
"localhost";
in
{
job_name = name;
static_configs = [
{ targets = ["${addr}:${toString cfg.port}"]; }
];
}
);
(name: cfg:
let addr =
if hasAttr "listenAddr" cfg then
cfg.listenAddr
else
"localhost";
in
{
job_name = name;
static_configs = [
{ targets = ["${addr}:${toString cfg.port}"]; }
];
}
);
in
rec {
programs.atop.enable = true;
services.prometheus = {
enable = true;
retentionTime = "60d";
@ -69,6 +70,11 @@ rec {
job_name = "arcology";
static_configs = [{ targets = ["localhost:8000"]; }];
}
{
job_name= "synapse";
metrics_path= "/_synapse/metrics";
static_configs = [{ targets = ["localhost:8008"]; }];
}
# gitea
];
};
@ -147,13 +153,14 @@ rec {
services.nginx.virtualHosts."home.rix.si" = {
locations."/prom" = {
proxyPass = "http://${config.services.prometheus.listenAddress}:${toString config.services.prometheus.port}";
proxyPass = "http://${config.services.prometheus.listenAddress}:${toString config.services.prometheus.port}/prom";
extraConfig = ''
auth_basic "closed site";
auth_basic_user_file /etc/nginx-htpasswd;
auth_basic_user_file /srv/nginx-htpasswd;
'';
};
locations."/grafana" = {
proxyPass = "http://${config.services.grafana.settings.server.http_addr}:${toString config.services.grafana.settings.server.http_port}/grafana";
extraConfig = ''