commit 8d098b2ab00dead74fa23af28637514e9f383431
parent 594d0c70404e0f21b5555c3bee8b036923bfe442
Author: Andreas Gruhler <andreas.gruhler@adfinis.com>
Date: Sun, 3 Nov 2024 14:59:09 +0100
feat: add pleroma
Diffstat:
4 files changed, 211 insertions(+), 0 deletions(-)
diff --git a/hcl/default/pleroma/data-volume.hcl b/hcl/default/pleroma/data-volume.hcl
@@ -0,0 +1,31 @@
+# Register external nfs volume with Nomad CSI
+# https://www.nomadproject.io/docs/commands/volume/register
+type = "csi"
+# Unique ID of the volume, volume.source field in a job
+id = "pleroma"
+# Display name of the volume.
+name = "pleroma"
+# ID of the physical volume from the storage provider
+external_id = "csi-pleroma"
+plugin_id = "nfs"
+
+# You must provide at least one capability block
+# You must provide a block for each capability
+# youintend to use in a job's volume block
+# https://www.nomadproject.io/docs/commands/volume/register
+capability {
+ access_mode = "multi-node-multi-writer"
+ attachment_mode = "file-system"
+}
+
+# https://github.com/kubernetes-csi/csi-driver-nfs/blob/master/docs/driver-parameters.md
+context {
+ server = "turris"
+ share = "csi-pleroma"
+}
+
+mount_options {
+ # mount.nfs: Either use '-o nolock' to keep locks local, or start statd.
+ mount_flags = ["nolock"]
+}
+
diff --git a/hcl/default/pleroma/pleroma.nomad b/hcl/default/pleroma/pleroma.nomad
@@ -0,0 +1,90 @@
+# https://git.pleroma.social/pleroma/pleroma-docker-compose
+
+job "pleroma" {
+ datacenters = ["dc1"]
+
+ vault {}
+
+ group "server" {
+ count = 1
+
+ volume "tls" {
+ type = "csi"
+ source = "certbot"
+ access_mode = "multi-node-multi-writer"
+ attachment_mode = "file-system"
+ }
+ volume "pleroma" {
+ type = "csi"
+ source = "pleroma"
+ access_mode = "multi-node-multi-writer"
+ attachment_mode = "file-system"
+ }
+
+ network {
+ port "http" {
+ to = 4000
+ }
+ port "https" {
+ static = 44393
+ }
+ }
+
+ task "nginx" {
+ driver = "podman"
+
+ config {
+ image = "docker.io/library/nginx:stable-alpine"
+ ports = ["https"]
+ volumes = [
+ # mount the templated config from the task directory to the container
+ "local/pleroma.conf:/etc/nginx/conf.d/pleroma.conf",
+ ]
+ }
+
+ volume_mount {
+ volume = "tls"
+ destination = "/etc/letsencrypt"
+ }
+
+ template {
+ destination = "${NOMAD_TASK_DIR}/pleroma.conf"
+ data = file("./templates/nginx.conf.tmpl")
+ }
+
+ resources {
+ memory = 50
+ memory_max = 256
+ cpu = 200
+ }
+ }
+
+ task "pleroma" {
+ driver = "podman"
+
+ config {
+ image = "git.pleroma.social:5050/pleroma/pleroma:latest"
+ force_pull = true
+ ports = ["http"]
+ }
+
+ volume_mount {
+ volume = "pleroma"
+ destination = "/var/lib/pleroma/uploads"
+ }
+
+ template {
+ destination = "${NOMAD_TASK_DIR}/pleroma.env"
+ data = file("./templates/pleroma.env.tmpl")
+ env = true
+ }
+
+ resources {
+ memory = 512
+ memory_max = 1024
+ cpu = 500
+ }
+ }
+
+ }
+}
diff --git a/hcl/default/pleroma/templates/nginx.conf.tmpl b/hcl/default/pleroma/templates/nginx.conf.tmpl
@@ -0,0 +1,81 @@
+proxy_cache_path /tmp/pleroma-media-cache levels=1:2 keys_zone=pleroma_media_cache:10m max_size=10g
+ inactive=720m use_temp_path=off;
+
+# this is explicitly IPv4 since Pleroma.Web.Endpoint binds on IPv4 only
+# and `localhost.` resolves to [::0] on some systems: see issue #930
+upstream phoenix {
+ server {{ env "NOMAD_ADDR_http" }} max_fails=5 fail_timeout=60s;
+}
+
+# Enable SSL session caching for improved performance
+ssl_session_cache shared:ssl_session_cache:10m;
+
+server {
+ server_name m.in0rdr.ch;
+
+ listen {{ env "NOMAD_PORT_https" }} ssl http2;
+ listen [::]:{{ env "NOMAD_PORT_https" }} ssl http2;
+ ssl_session_timeout 1d;
+ ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
+ ssl_session_tickets off;
+
+ ssl_trusted_certificate /etc/letsencrypt/live/m.in0rdr.ch/chain.pem;
+ ssl_certificate /etc/letsencrypt/live/m.in0rdr.ch/fullchain.pem;
+ ssl_certificate_key /etc/letsencrypt/live/m.in0rdr.ch/privkey.pem;
+
+ ssl_protocols TLSv1.2 TLSv1.3;
+ ssl_ciphers "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4";
+ ssl_prefer_server_ciphers off;
+ # In case of an old server with an OpenSSL version of 1.0.2 or below,
+ # leave only prime256v1 or comment out the following line.
+ ssl_ecdh_curve X25519:prime256v1:secp384r1:secp521r1;
+ ssl_stapling on;
+ ssl_stapling_verify on;
+
+ gzip_vary on;
+ gzip_proxied any;
+ gzip_comp_level 6;
+ gzip_buffers 16 8k;
+ gzip_http_version 1.1;
+ gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript application/activity+json application/atom+xml;
+
+ # the nginx default is 1m, not enough for large media uploads
+ client_max_body_size 16m;
+ ignore_invalid_headers off;
+
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_set_header Host $http_host;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+
+ location / {
+ proxy_pass http://phoenix;
+ }
+
+ # Uncomment this if you want notice compatibility routes for frontends like Soapbox.
+ # location ~ ^/@[^/]+/([^/]+)$ {
+ # proxy_pass http://phoenix/notice/$1;
+ # }
+ #
+ # location ~ ^/@[^/]+/posts/([^/]+)$ {
+ # proxy_pass http://phoenix/notice/$1;
+ # }
+ #
+ # location ~ ^/[^/]+/status/([^/]+)$ {
+ # proxy_pass http://phoenix/notice/$1;
+ # }
+
+ location ~ ^/(media|proxy) {
+ proxy_cache pleroma_media_cache;
+ slice 1m;
+ proxy_cache_key $host$uri$is_args$args$slice_range;
+ proxy_set_header Range $slice_range;
+ proxy_cache_valid 200 206 301 304 1h;
+ proxy_cache_lock on;
+ proxy_ignore_client_abort on;
+ proxy_buffering on;
+ chunked_transfer_encoding on;
+ proxy_pass http://phoenix;
+ }
+}
diff --git a/hcl/default/pleroma/templates/pleroma.env.tmpl b/hcl/default/pleroma/templates/pleroma.env.tmpl
@@ -0,0 +1,9 @@
+DB_USER={{with secret "kv/pleroma"}}{{index .Data.data.db_user}}{{end}}
+DB_PASS={{with secret "kv/pleroma"}}{{index .Data.data.db_pass}}{{end}}
+DB_HOST=postgres.lan
+DB_NAME=pleroma
+INSTANCE_NAME=Pleroma
+ADMIN_EMAIL=agruhl@gmx.ch
+NOTIFY_EMAIL=pleroma+admin@m.in0rdr.ch
+DOMAIN=m.in0rdr.ch
+PORT=4000