mirror of
https://akkoma.dev/AkkomaGang/akkoma.git
synced 2024-11-23 15:05:45 +00:00
2c55f7d7cb
Current FedSocket implementation has a bunch of problems. It doesn't have proper error handling (in case of an error the server just doesn't respond until the connection is closed, while the client doesn't match any error messages and just assumes there has been an error after 15s) and the code is full of bad descisions (see: fetch registry which uses uuids for no reason and waits for a response by recursively querying a ets table until the value changes, or double JSON encoding). Sometime ago I almost completed rewriting fedsockets from scrach to adress these issues. However, while doing so, I realized that fedsockets are just too overkill for what they were trying to accomplish, which is reduce the overhead of federation by not signing every message. This could be done without reimplementing failure states and endpoint logic we already have with HTTP by, for example, using TLS cert auth, or switching to a more performant signature algorithm. I opened https://git.pleroma.social/pleroma/pleroma/-/issues/2262 for further discussion on alternatives to fedsockets. From discussions I had with other Pleroma developers it seems like they would approve the descision to remove them as well, therefore I am submitting this patch.
97 lines
3.7 KiB
Nginx Configuration File
97 lines
3.7 KiB
Nginx Configuration File
# default nginx site config for Pleroma
|
|
#
|
|
# Simple installation instructions:
|
|
# 1. Install your TLS certificate, possibly using Let's Encrypt.
|
|
# 2. Replace 'example.tld' with your instance's domain wherever it appears.
|
|
# 3. Copy this file to /etc/nginx/sites-available/ and then add a symlink to it
|
|
# in /etc/nginx/sites-enabled/ and run 'nginx -s reload' or restart nginx.
|
|
|
|
proxy_cache_path /tmp/pleroma-media-cache levels=1:2 keys_zone=pleroma_media_cache:10m max_size=10g
|
|
inactive=720m use_temp_path=off;
|
|
|
|
# this is explicitly IPv4 since Pleroma.Web.Endpoint binds on IPv4 only
|
|
# and `localhost.` resolves to [::0] on some systems: see issue #930
|
|
upstream phoenix {
|
|
server 127.0.0.1:4000 max_fails=5 fail_timeout=60s;
|
|
}
|
|
|
|
server {
|
|
server_name example.tld;
|
|
|
|
listen 80;
|
|
listen [::]:80;
|
|
|
|
# Uncomment this if you need to use the 'webroot' method with certbot. Make sure
|
|
# that the directory exists and that it is accessible by the webserver. If you followed
|
|
# the guide, you already ran 'mkdir -p /var/lib/letsencrypt' to create the folder.
|
|
# You may need to load this file with the ssl server block commented out, run certbot
|
|
# to get the certificate, and then uncomment it.
|
|
#
|
|
# location ~ /\.well-known/acme-challenge {
|
|
# root /var/lib/letsencrypt/;
|
|
# }
|
|
location / {
|
|
return 301 https://$server_name$request_uri;
|
|
}
|
|
}
|
|
|
|
# Enable SSL session caching for improved performance
|
|
ssl_session_cache shared:ssl_session_cache:10m;
|
|
|
|
server {
|
|
server_name example.tld;
|
|
|
|
listen 443 ssl http2;
|
|
listen [::]:443 ssl http2;
|
|
ssl_session_timeout 1d;
|
|
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
|
|
ssl_session_tickets off;
|
|
|
|
ssl_trusted_certificate /etc/letsencrypt/live/example.tld/chain.pem;
|
|
ssl_certificate /etc/letsencrypt/live/example.tld/fullchain.pem;
|
|
ssl_certificate_key /etc/letsencrypt/live/example.tld/privkey.pem;
|
|
|
|
ssl_protocols TLSv1.2 TLSv1.3;
|
|
ssl_ciphers "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4";
|
|
ssl_prefer_server_ciphers off;
|
|
# In case of an old server with an OpenSSL version of 1.0.2 or below,
|
|
# leave only prime256v1 or comment out the following line.
|
|
ssl_ecdh_curve X25519:prime256v1:secp384r1:secp521r1;
|
|
ssl_stapling on;
|
|
ssl_stapling_verify on;
|
|
|
|
gzip_vary on;
|
|
gzip_proxied any;
|
|
gzip_comp_level 6;
|
|
gzip_buffers 16 8k;
|
|
gzip_http_version 1.1;
|
|
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript application/activity+json application/atom+xml;
|
|
|
|
# the nginx default is 1m, not enough for large media uploads
|
|
client_max_body_size 16m;
|
|
ignore_invalid_headers off;
|
|
|
|
proxy_http_version 1.1;
|
|
proxy_set_header Upgrade $http_upgrade;
|
|
proxy_set_header Connection "upgrade";
|
|
proxy_set_header Host $http_host;
|
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
|
|
location / {
|
|
proxy_pass http://phoenix;
|
|
}
|
|
|
|
location ~ ^/(media|proxy) {
|
|
proxy_cache pleroma_media_cache;
|
|
slice 1m;
|
|
proxy_cache_key $host$uri$is_args$args$slice_range;
|
|
proxy_set_header Range $slice_range;
|
|
proxy_cache_valid 200 206 301 304 1h;
|
|
proxy_cache_lock on;
|
|
proxy_ignore_client_abort on;
|
|
proxy_buffering on;
|
|
chunked_transfer_encoding on;
|
|
proxy_pass http://phoenix;
|
|
}
|
|
}
|