add takahe as submodule
This commit is contained in:
parent
ef58e00d2b
commit
8638479240
22 changed files with 447 additions and 91 deletions
10
.dockerignore
Normal file
10
.dockerignore
Normal file
|
@ -0,0 +1,10 @@
|
|||
.DS_Store
|
||||
.env
|
||||
.venv
|
||||
.vscode
|
||||
.github
|
||||
.git
|
||||
__pycache__
|
||||
/doc
|
||||
/media
|
||||
/static
|
4
.gitmodules
vendored
Normal file
4
.gitmodules
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
[submodule "neodb-takahe"]
|
||||
path = neodb-takahe
|
||||
url = git@github.com:alphatownsman/neodb-takahe.git
|
||||
branch = neodb
|
|
@ -30,7 +30,6 @@ repos:
|
|||
rev: 22.12.0
|
||||
hooks:
|
||||
- id: black
|
||||
language_version: python3.11
|
||||
|
||||
- repo: https://github.com/Riverside-Healthcare/djLint
|
||||
rev: v1.32.1
|
||||
|
|
25
Dockerfile
25
Dockerfile
|
@ -1,10 +1,14 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
FROM python:3.11-slim-bullseye
|
||||
FROM python:3.11-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
RUN useradd -U app
|
||||
COPY . /neodb
|
||||
RUN mkdir -p /www
|
||||
WORKDIR /neodb
|
||||
RUN apt-get update \
|
||||
RUN mv neodb-takahe /takahe
|
||||
RUN cp misc/neodb-manage misc/takahe-manage /bin
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
libpq-dev \
|
||||
|
@ -13,10 +17,13 @@ RUN apt-get update \
|
|||
nginx \
|
||||
opencc \
|
||||
git
|
||||
RUN busybox --install
|
||||
COPY misc/nginx.conf.d/* /etc/nginx/conf.d/
|
||||
RUN echo >> /etc/nginx/nginx.conf
|
||||
RUN echo 'daemon off;' >> /etc/nginx/nginx.conf
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache python3 -m pip install --upgrade -r requirements.txt
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache cd /takahe && python3 -m pip install --upgrade -r requirements.txt
|
||||
|
||||
RUN apt-get purge -y --auto-remove \
|
||||
build-essential \
|
||||
libpq-dev \
|
||||
|
@ -24,8 +31,10 @@ RUN apt-get purge -y --auto-remove \
|
|||
|
||||
RUN python3 manage.py compilescss \
|
||||
&& python3 manage.py collectstatic --noinput
|
||||
RUN cp -R misc/www /www
|
||||
RUN mv static /www/s
|
||||
|
||||
RUN cd /takahe && TAKAHE_DATABASE_SERVER="postgres://x@y/z" TAKAHE_SECRET_KEY="t" TAKAHE_MAIN_DOMAIN="x.y" python3 manage.py collectstatic --noinput
|
||||
|
||||
USER app:app
|
||||
|
||||
# invoke check by default
|
||||
CMD [ "python3", "/neodb/manage.py", "check" ]
|
||||
CMD [ "sh", "-c", 'python3 /neodb/manage.py check && TAKAHE_DATABASE_SERVER="postgres://x@y/z" TAKAHE_SECRET_KEY="t" TAKAHE_MAIN_DOMAIN="x.y" python3 manage.py collectstatic --noinput python3 /takahe/manage.py check' ]
|
||||
|
|
|
@ -56,7 +56,6 @@ INSTALLED_APPS = [
|
|||
"polymorphic",
|
||||
"easy_thumbnails",
|
||||
"user_messages",
|
||||
"fontawesomefree",
|
||||
# "anymail",
|
||||
# "silk",
|
||||
]
|
||||
|
@ -130,9 +129,9 @@ CACHES = {
|
|||
DATABASES = {
|
||||
"default": {
|
||||
"ENGINE": "django.db.backends.postgresql",
|
||||
"NAME": os.environ.get("NEODB_DB_NAME", "test"),
|
||||
"USER": os.environ.get("NEODB_DB_USER", "postgres"),
|
||||
"PASSWORD": os.environ.get("NEODB_DB_PASSWORD", "admin123"),
|
||||
"NAME": os.environ.get("NEODB_DB_NAME", "test_neodb"),
|
||||
"USER": os.environ.get("NEODB_DB_USER", "testuser"),
|
||||
"PASSWORD": os.environ.get("NEODB_DB_PASSWORD", "testpass"),
|
||||
"HOST": os.environ.get("NEODB_DB_HOST", "127.0.0.1"),
|
||||
"PORT": int(os.environ.get("NEODB_DB_PORT", 5432)),
|
||||
"OPTIONS": {
|
||||
|
@ -191,7 +190,6 @@ SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
|
|||
DATA_UPLOAD_MAX_MEMORY_SIZE = 100 * 1024 * 1024
|
||||
CSRF_COOKIE_SECURE = True
|
||||
SESSION_COOKIE_SECURE = True
|
||||
|
||||
if os.getenv("NEODB_SSL", "") != "":
|
||||
SECURE_SSL_REDIRECT = True
|
||||
SECURE_HSTS_PRELOAD = True
|
||||
|
@ -238,6 +236,10 @@ REDIRECT_URIS = SITE_INFO["site_url"] + "/account/login/oauth"
|
|||
# for sites migrated from previous version, either wipe mastodon client ids or use:
|
||||
# REDIRECT_URIS = f'{SITE_INFO["site_url"]}/users/OAuth2_login/'
|
||||
|
||||
CSRF_TRUSTED_ORIGINS = [SITE_INFO["site_url"]]
|
||||
if DEBUG:
|
||||
CSRF_TRUSTED_ORIGINS += ["http://127.0.0.1:8000", "http://localhost:8000"]
|
||||
|
||||
# Path to save report related images, ends with slash
|
||||
REPORT_MEDIA_PATH_ROOT = "report/"
|
||||
MARKDOWNX_MEDIA_PATH = "review/"
|
||||
|
|
|
@ -19,7 +19,6 @@ from journal.models import (
|
|||
ShelfMember,
|
||||
ShelfType,
|
||||
ShelfTypeNames,
|
||||
q_item_in_category,
|
||||
q_piece_in_home_feed_of_user,
|
||||
q_piece_visible_to_user,
|
||||
)
|
||||
|
@ -259,12 +258,6 @@ def reviews(request, item_path, item_uuid):
|
|||
def discover(request):
|
||||
if request.method != "GET":
|
||||
raise BadRequest()
|
||||
user = request.user
|
||||
if user.is_authenticated:
|
||||
layout = user.preference.discover_layout
|
||||
else:
|
||||
layout = []
|
||||
|
||||
cache_key = "public_gallery"
|
||||
gallery_list = cache.get(cache_key, [])
|
||||
|
||||
|
@ -276,10 +269,12 @@ def discover(request):
|
|||
# )
|
||||
# gallery["items"] = Item.objects.filter(id__in=ids)
|
||||
|
||||
if user.is_authenticated:
|
||||
if request.user.is_authenticated:
|
||||
layout = request.user.preference.discover_layout
|
||||
identity = request.user.identity
|
||||
podcast_ids = [
|
||||
p.item_id
|
||||
for p in user.shelf_manager.get_latest_members(
|
||||
for p in identity.shelf_manager.get_latest_members(
|
||||
ShelfType.PROGRESS, ItemCategory.Podcast
|
||||
)
|
||||
]
|
||||
|
@ -289,7 +284,7 @@ def discover(request):
|
|||
books_in_progress = Edition.objects.filter(
|
||||
id__in=[
|
||||
p.item_id
|
||||
for p in user.shelf_manager.get_latest_members(
|
||||
for p in identity.shelf_manager.get_latest_members(
|
||||
ShelfType.PROGRESS, ItemCategory.Book
|
||||
)[:10]
|
||||
]
|
||||
|
@ -297,22 +292,23 @@ def discover(request):
|
|||
tvshows_in_progress = Item.objects.filter(
|
||||
id__in=[
|
||||
p.item_id
|
||||
for p in user.shelf_manager.get_latest_members(
|
||||
for p in identity.shelf_manager.get_latest_members(
|
||||
ShelfType.PROGRESS, ItemCategory.TV
|
||||
)[:10]
|
||||
]
|
||||
)
|
||||
else:
|
||||
identity = None
|
||||
recent_podcast_episodes = []
|
||||
books_in_progress = []
|
||||
tvshows_in_progress = []
|
||||
layout = []
|
||||
|
||||
return render(
|
||||
request,
|
||||
"discover.html",
|
||||
{
|
||||
"user": user,
|
||||
"identity": user.identity,
|
||||
"identity": identity,
|
||||
"gallery_list": gallery_list,
|
||||
"recent_podcast_episodes": recent_podcast_episodes,
|
||||
"books_in_progress": books_in_progress,
|
||||
|
|
104
doc/install-docker.md
Normal file
104
doc/install-docker.md
Normal file
|
@ -0,0 +1,104 @@
|
|||
Run NeoDB in Docker
|
||||
===================
|
||||
|
||||
## Overview
|
||||
For small and medium NeoDB instances, it's recommended to deploy as a local container cluster with `docker-compose`.
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
web[[Your reverse proxy server with SSL]] --- neodb-nginx[nginx listening on localhost:8000]
|
||||
subgraph Containers managed by docker-compose
|
||||
neodb-nginx --- neodb-web
|
||||
neodb-nginx --- takahe-web
|
||||
neodb-worker --- typesense[(typesense)]
|
||||
neodb-worker --- neodb-db[(neodb-db)]
|
||||
neodb-worker --- redis[(redis)]
|
||||
neodb-web --- typesense
|
||||
neodb-web --- neodb-db
|
||||
neodb-web --- redis
|
||||
neodb-web --- takahe-db[(takahe-db)]
|
||||
migration([migration]) --- neodb-db
|
||||
migration --- takahe-db
|
||||
takahe-web --- takahe-db
|
||||
takahe-web --- redis
|
||||
takahe-stator --- takahe-db
|
||||
takahe-stator --- redis
|
||||
end
|
||||
```
|
||||
|
||||
As shown in the diagram, a reverse proxy server (e.g. nginx, or Cloudflare tunnel) will be required, it should have SSL configured and pointing to `http://localhost:8000`; the rest is handled by `docker-compose` and containers.
|
||||
|
||||
## Install Docker and add user to docker group
|
||||
Create a user (e.g. `neouser`) to run neodb, execute these as *root* :
|
||||
```
|
||||
# apt install docker.io docker-compose
|
||||
# adduser --ingroup docker neouser
|
||||
```
|
||||
|
||||
## Get configuration files
|
||||
- create a folder for configuration, eg ~/neodb/config
|
||||
- grab `docker-compose.yml` and `neodb.env.example` from source code
|
||||
- rename `neodb.env.example` to `.env`
|
||||
|
||||
## Set up .env file
|
||||
Change essential options like `NEODB_SITE_DOMAIN` in `.env` before starting the cluster for the first time. Changing them later may have unintended consequences, please make sure they are correct before exposing the service externally.
|
||||
|
||||
- `NEODB_SITE_NAME` - name of your site
|
||||
- `NEODB_SITE_DOMAIN` - domain name of your site
|
||||
- `NEODB_SECRET_KEY` - encryption key of session data
|
||||
- `NEODB_DATA` is the path to store db/media/cache, it's `../data` by default, but can be any path that's writable
|
||||
|
||||
See `configuration.md` for more details
|
||||
|
||||
## Start docker
|
||||
in the folder with `docker-compose.yml` and `neodb.env`, execute as the user you just created:
|
||||
```
|
||||
$ docker-compose pull
|
||||
$ docker-compose up -d
|
||||
```
|
||||
|
||||
In a minute or so, the site should be up at 127.0.0.1:8000 , you may check it with:
|
||||
```
|
||||
$ curl http://localhost:8000/nodeinfo/2.0/
|
||||
```
|
||||
|
||||
JSON response will be returned if the server is up and running:
|
||||
```
|
||||
{"version": "2.0", "software": {"name": "neodb", "version": "0.8-dev"}, "protocols": ["activitypub", "neodb"], "services": {"outbound": [], "inbound": []}, "usage": {"users": {"total": 1}, "localPosts": 0}, "openRegistrations": true, "metadata": {}}
|
||||
```
|
||||
|
||||
## Make the site available publicly
|
||||
|
||||
Next step is to expose `127.0.0.1:8000` to external network as `https://yourdomain.tld` . There are many ways to do it, you may use nginx as a reverse proxy with a ssl cert, or configure a CDN provider to handle the SSL. There's no detailed instruction yet but contributions are welcomed.
|
||||
|
||||
NeoDB requires `https` by default. Although `http` may be technically possible, it's tedious to set up and not secure, hence not recommended.
|
||||
|
||||
## Update NeoDB
|
||||
|
||||
Check the release notes, update `docker-compose.yml` and `.env` as instructed. pull the image
|
||||
```
|
||||
docker-compose pull
|
||||
```
|
||||
|
||||
If there's no change in `docker-compose.yml`, restart only NeoDB services:
|
||||
```
|
||||
$ docker-compose stop neodb-web neodb-worker neodb-worker-extra takahe-web takahe-stator nginx
|
||||
$ docker-compose up -d
|
||||
```
|
||||
|
||||
Otherwise restart the entire cluster:
|
||||
```
|
||||
$ docker-compose down
|
||||
$ docker-compose up -d
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- `docker-compose ps` to see if any service is down, (btw it's normal that `migration` is in `Exit 0` state)
|
||||
- `docker-compose run shell` to run a shell into the cluster; or `docker-compose run root` for root shell, and `apt` is available if extra package needed
|
||||
|
||||
## Scaling
|
||||
|
||||
If you are running a high-traffic instance, spin up `NEODB_WEB_WORKER_NUM`, `TAKAHE_WEB_WORKER_NUM`, `TAKAHE_STATOR_CONCURRENCY` and `TAKAHE_STATOR_CONCURRENCY_PER_MODEL` as long as your host server can handle them.
|
||||
|
||||
Further scaling up with multiple nodes (e.g. via Kubernetes) is beyond the scope of this document, but consider run db/redis/typesense separately, and then duplicate web/worker/stator containers as long as connections and mounts are properly configured; `migration` only runs once when start or upgrade, it should be kept that way.
|
|
@ -20,11 +20,7 @@ This is a very basic guide with limited detail, contributions welcomed
|
|||
0 Run in Docker
|
||||
---------------
|
||||
|
||||
```
|
||||
cp neodb.env.dist neodb.env # update this configuration
|
||||
|
||||
docker-compose up
|
||||
```
|
||||
Recommended, see [Docker Installation](install-docker.md)
|
||||
|
||||
1 Manual Install
|
||||
----------------
|
||||
|
|
|
@ -1,49 +1,78 @@
|
|||
version: "3.4"
|
||||
version: "3.8"
|
||||
|
||||
# NEODB Docker Compose File
|
||||
#
|
||||
# Note: configuration here may not be secure for production usage
|
||||
# Note: may not be secure for production usage, use at your own risk
|
||||
#
|
||||
# The following env variable are expected from .env or command line
|
||||
# - NEODB_SECRET_KEY
|
||||
# - NEODB_SITE_DOMAIN
|
||||
# - NEODB_SITE_NAME
|
||||
# - NEODB_DATA
|
||||
|
||||
x-shared:
|
||||
neodb-service: &neodb-service
|
||||
build: .
|
||||
image: neodb:latest
|
||||
env_file:
|
||||
- neodb.env
|
||||
image: nerodb/neodb:latest
|
||||
environment:
|
||||
- NEODB_DB_NAME=neodb
|
||||
- NEODB_DB_USER=neodb
|
||||
- NEODB_DB_PASSWORD=aubergine
|
||||
- NEODB_DB_HOST=neodb-db
|
||||
- NEODB_DB_PORT=5432
|
||||
- NEODB_REDIS_HOST=neodb-redis
|
||||
- NEODB_REDIS_HOST=redis
|
||||
- NEODB_REDIS_PORT=6379
|
||||
- NEODB_REDIS_DB=0
|
||||
- NEODB_TYPESENSE_ENABLE=1
|
||||
- NEODB_TYPESENSE_HOST=neodb-search
|
||||
- NEODB_TYPESENSE_HOST=typesense
|
||||
- NEODB_TYPESENSE_PORT=8108
|
||||
- NEODB_TYPESENSE_KEY=eggplant
|
||||
- NEODB_STATIC_ROOT=/www/static/
|
||||
- NEODB_MEDIA_ROOT=/www/media/
|
||||
- NEODB_FROM_EMAIL=no-reply@${NEODB_SITE_DOMAIN}
|
||||
- NEODB_MEDIA_ROOT=/www/m/
|
||||
- TAKAHE_DB_NAME=takahe
|
||||
- TAKAHE_DB_USER=takahe
|
||||
- TAKAHE_DB_PASSWORD=aubergine
|
||||
- TAKAHE_DB_HOST=takahe-db
|
||||
- TAKAHE_DB_PORT=5432
|
||||
- TAKAHE_SECRET_KEY=${NEODB_SECRET_KEY}
|
||||
- TAKAHE_MAIN_DOMAIN=${NEODB_SITE_DOMAIN}
|
||||
- TAKAHE_MEDIA_URL=https://${NEODB_SITE_DOMAIN}/media/
|
||||
- TAKAHE_EMAIL_FROM=no-reply@${NEODB_SITE_DOMAIN}
|
||||
- TAKAHE_DATABASE_SERVER=postgres://takahe:aubergine@takahe-db/takahe
|
||||
- TAKAHE_CACHES_DEFAULT=redis://redis:6379/0
|
||||
- TAKAHE_MEDIA_BACKEND=local://www/media/
|
||||
- TAKAHE_MEDIA_ROOT=/www/media/
|
||||
- TAKAHE_USE_PROXY_HEADERS=true
|
||||
- TAKAHE_STATOR_CONCURRENCY=4
|
||||
- TAKAHE_STATOR_CONCURRENCY_PER_MODEL=2
|
||||
- TAKAHE_DEBUG=${NEODB_DEBUG:-False}
|
||||
restart: "on-failure"
|
||||
volumes:
|
||||
- ${NEODB_DATA:-../data}/neodb-media:/www/media
|
||||
- ${NEODB_DATA:-../data}/neodb-media:/www/m
|
||||
- ${NEODB_DATA:-../data}/takahe-media:/www/media
|
||||
- ${NEODB_DATA:-../data}/takahe-cache:/www/cache
|
||||
- ${NEODB_DATA:-../data}/www-root:/www/root
|
||||
# - ${NEODB_DATA:-../data}/log:/var/log/nginx
|
||||
depends_on:
|
||||
- neodb-redis
|
||||
- redis
|
||||
- neodb-db
|
||||
- neodb-search
|
||||
- typesense
|
||||
- takahe-db
|
||||
|
||||
services:
|
||||
neodb-redis:
|
||||
redis:
|
||||
image: redis:alpine
|
||||
ports:
|
||||
- "16379:6379"
|
||||
# ports:
|
||||
# - "16379:6379"
|
||||
command: redis-server --save 60 1 --loglevel warning
|
||||
volumes:
|
||||
- ${NEODB_DATA:-../data}/redis:/data
|
||||
|
||||
neodb-search:
|
||||
typesense:
|
||||
image: typesense/typesense:0.25.0
|
||||
restart: "on-failure"
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-vf', 'http://127.0.0.1:8108/health']
|
||||
# healthcheck:
|
||||
# test: ['CMD', 'curl', '-vf', 'http://127.0.0.1:8108/health']
|
||||
# ports:
|
||||
# - "18108:8108"
|
||||
environment:
|
||||
|
@ -57,7 +86,7 @@ services:
|
|||
healthcheck:
|
||||
test: ['CMD', 'pg_isready', '-U', 'neodb']
|
||||
volumes:
|
||||
- ${NEODB_DATA:-../data}/neodb-data:/var/lib/postgresql/data
|
||||
- ${NEODB_DATA:-../data}/neodb-db:/var/lib/postgresql/data
|
||||
# ports:
|
||||
# - "15432:5432"
|
||||
environment:
|
||||
|
@ -65,23 +94,40 @@ services:
|
|||
- POSTGRES_USER=neodb
|
||||
- POSTGRES_PASSWORD=aubergine
|
||||
|
||||
takahe-db:
|
||||
image: postgres:14-alpine
|
||||
healthcheck:
|
||||
test: ['CMD', 'pg_isready', '-U', 'takahe']
|
||||
volumes:
|
||||
- ${NEODB_DATA:-../data}/takahe-db:/var/lib/postgresql/data
|
||||
# ports:
|
||||
# - "16432:5432"
|
||||
environment:
|
||||
- POSTGRES_DB=takahe
|
||||
- POSTGRES_USER=takahe
|
||||
- POSTGRES_PASSWORD=aubergine
|
||||
|
||||
migration:
|
||||
<<: *neodb-service
|
||||
restart: "no"
|
||||
command: python /neodb/manage.py migrate
|
||||
command: "sh -c 'python /takahe/manage.py migrate && python /neodb/manage.py migrate'"
|
||||
depends_on:
|
||||
neodb-db:
|
||||
condition: service_healthy
|
||||
neodb-search:
|
||||
typesense:
|
||||
condition: service_started
|
||||
neodb-redis:
|
||||
redis:
|
||||
condition: service_started
|
||||
takahe-db:
|
||||
condition: service_healthy
|
||||
|
||||
neodb-web:
|
||||
<<: *neodb-service
|
||||
# ports:
|
||||
# - "18000:8000"
|
||||
command: gunicorn boofilsic.wsgi -w 8 --preload -b 0.0.0.0:8000
|
||||
command: gunicorn boofilsic.wsgi -w ${NEODB_WEB_WORKER_NUM:-8} --preload -b 0.0.0.0:8000
|
||||
healthcheck:
|
||||
test: ['CMD', 'wget', '-qO/tmp/test', 'http://127.0.0.1:8000/discover/']
|
||||
depends_on:
|
||||
migration:
|
||||
condition: service_completed_successfully
|
||||
|
@ -93,18 +139,50 @@ services:
|
|||
migration:
|
||||
condition: service_completed_successfully
|
||||
|
||||
neodb-worker-secondary:
|
||||
neodb-worker-extra:
|
||||
<<: *neodb-service
|
||||
command: python /neodb/manage.py rqworker --with-scheduler fetch crawl
|
||||
depends_on:
|
||||
migration:
|
||||
condition: service_completed_successfully
|
||||
|
||||
neodb-nginx:
|
||||
takahe-web:
|
||||
<<: *neodb-service
|
||||
command: nginx
|
||||
# ports:
|
||||
# - "19000:8000"
|
||||
command: gunicorn --chdir /takahe takahe.wsgi -w ${TAKAHE_WEB_WORKER_NUM:-8} --preload -b 0.0.0.0:8000
|
||||
healthcheck:
|
||||
test: ['CMD', 'wget', '-qO/tmp/test', 'http://127.0.0.1:8000/nodeinfo/2.0/']
|
||||
depends_on:
|
||||
migration:
|
||||
condition: service_completed_successfully
|
||||
|
||||
takahe-stator:
|
||||
<<: *neodb-service
|
||||
command: python /takahe/manage.py runstator
|
||||
depends_on:
|
||||
migration:
|
||||
condition: service_completed_successfully
|
||||
|
||||
nginx:
|
||||
<<: *neodb-service
|
||||
user: "root:root"
|
||||
command: nginx -g 'daemon off;'
|
||||
depends_on:
|
||||
takahe-web:
|
||||
condition: service_started
|
||||
neodb-web:
|
||||
condition: service_started
|
||||
ports:
|
||||
- "${NEODB_PORT:-8000}:8000"
|
||||
|
||||
shell:
|
||||
<<: *neodb-service
|
||||
command: bash
|
||||
profiles: ["tools"]
|
||||
|
||||
root:
|
||||
<<: *neodb-service
|
||||
command: bash
|
||||
profiles: ["tools"]
|
||||
user: "root:root"
|
||||
|
|
2
misc/neodb-manage
Executable file
2
misc/neodb-manage
Executable file
|
@ -0,0 +1,2 @@
|
|||
#!/bin/sh
|
||||
python /neodb/manage.py $@
|
|
@ -1,22 +1,107 @@
|
|||
proxy_cache_path /www/cache levels=1:2 keys_zone=takahe:20m inactive=14d max_size=1g;
|
||||
|
||||
upstream neodb {
|
||||
server neodb-web:8000;
|
||||
}
|
||||
|
||||
upstream takahe {
|
||||
server takahe-web:8000;
|
||||
}
|
||||
|
||||
server {
|
||||
server_name neodb.social;
|
||||
listen 8000;
|
||||
location = /favicon.ico {
|
||||
root /www;
|
||||
access_log off; log_not_found off;
|
||||
|
||||
charset utf-8;
|
||||
ignore_invalid_headers on;
|
||||
client_max_body_size 100M;
|
||||
client_body_buffer_size 128k;
|
||||
proxy_connect_timeout 900;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_http_version 1.1;
|
||||
proxy_hide_header X-Takahe-User;
|
||||
proxy_hide_header X-Takahe-Identity;
|
||||
|
||||
# allow admin to serv their own robots.txt/favicon.ico/...
|
||||
location ~ ^/\w+\.\w+$ {
|
||||
root /www/root;
|
||||
access_log off;
|
||||
log_not_found off;
|
||||
}
|
||||
location /static/ {
|
||||
alias /takahe/static-collected/;
|
||||
add_header Cache-Control "public, max-age=604800, immutable";
|
||||
}
|
||||
location /s/ {
|
||||
alias /neodb/static/;
|
||||
add_header Cache-Control "public, max-age=604800, immutable";
|
||||
}
|
||||
location /m/ {
|
||||
alias /www/m/;
|
||||
add_header Cache-Control "public, max-age=604800, immutable";
|
||||
}
|
||||
# Proxies media and remote media with caching
|
||||
location ~* ^/(media|proxy) {
|
||||
# Cache media and proxied resources
|
||||
proxy_cache takahe;
|
||||
proxy_cache_key $host$uri;
|
||||
proxy_cache_valid 200 304 4h;
|
||||
proxy_cache_valid 301 307 4h;
|
||||
proxy_cache_valid 500 502 503 504 0s;
|
||||
proxy_cache_valid any 1h;
|
||||
add_header X-Cache $upstream_cache_status;
|
||||
|
||||
# Signal to Takahē that we support full URI accel proxying
|
||||
proxy_set_header X-Takahe-Accel true;
|
||||
proxy_pass http://takahe;
|
||||
}
|
||||
# Internal target for X-Accel redirects that stashes the URI in a var
|
||||
location /__takahe_accel__/ {
|
||||
internal;
|
||||
set $takahe_realuri $upstream_http_x_takahe_realuri;
|
||||
rewrite ^/(.+) /__takahe_accel__/real/;
|
||||
}
|
||||
# Real internal-only target for X-Accel redirects
|
||||
location /__takahe_accel__/real/ {
|
||||
# Only allow internal redirects
|
||||
internal;
|
||||
|
||||
# # Reconstruct the remote URL
|
||||
resolver 9.9.9.9 8.8.8.8 valid=300s;
|
||||
|
||||
# Unset Authorization and Cookie for security reasons.
|
||||
proxy_set_header Authorization '';
|
||||
proxy_set_header Cookie '';
|
||||
proxy_set_header User-Agent 'takahe/nginx';
|
||||
proxy_set_header Host $proxy_host;
|
||||
proxy_set_header X-Forwarded-For '';
|
||||
proxy_set_header X-Forwarded-Host '';
|
||||
proxy_set_header X-Forwarded-Server '';
|
||||
proxy_set_header X-Real-Ip '';
|
||||
|
||||
# Stops the local disk from being written to (just forwards data through)
|
||||
proxy_max_temp_file_size 0;
|
||||
|
||||
# Proxy the remote file through to the client
|
||||
proxy_pass $takahe_realuri;
|
||||
proxy_ssl_server_name on;
|
||||
add_header X-Takahe-Accel "HIT";
|
||||
|
||||
# Cache these responses too
|
||||
proxy_cache takahe;
|
||||
# Cache after a single request
|
||||
proxy_cache_min_uses 1;
|
||||
proxy_cache_key $takahe_realuri;
|
||||
proxy_cache_valid 200 304 720h;
|
||||
proxy_cache_valid 301 307 12h;
|
||||
proxy_cache_valid 500 502 503 504 0s;
|
||||
proxy_cache_valid any 72h;
|
||||
add_header X-Cache $upstream_cache_status;
|
||||
}
|
||||
location ~* ^/(@|\.well-known|actor|inbox|nodeinfo|api/v1|api/v2|auth|oauth|tags|settings|media|proxy|admin|djadmin) {
|
||||
proxy_pass http://takahe;
|
||||
}
|
||||
location / {
|
||||
client_max_body_size 100M;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_pass http://neodb-web:8000;
|
||||
}
|
||||
|
||||
location /s/ {
|
||||
root /www;
|
||||
}
|
||||
|
||||
location /m/ {
|
||||
root /www;
|
||||
proxy_pass http://neodb;
|
||||
}
|
||||
}
|
||||
|
|
2
misc/takahe-manage
Executable file
2
misc/takahe-manage
Executable file
|
@ -0,0 +1,2 @@
|
|||
#!/bin/sh
|
||||
python /takahe/manage.py $@
|
1
neodb-takahe
Submodule
1
neodb-takahe
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit 4bf7dd6b6e6594fdfe2df4e9b3b5383d5aea7063
|
|
@ -1,6 +0,0 @@
|
|||
NEODB_SECRET_KEY=change_me
|
||||
NEODB_SITE_NAME=Example Site
|
||||
NEODB_SITE_DOMAIN=example.site
|
||||
#NEODB_PORT=8000
|
||||
#NEODB_SSL=1
|
||||
#NEODB_DATA=/var/lib/neodb
|
23
neodb.env.example
Normal file
23
neodb.env.example
Normal file
|
@ -0,0 +1,23 @@
|
|||
# NEODB Configuration
|
||||
|
||||
# copy along with docker-compose.yml, rename it to .env
|
||||
|
||||
# Change these before start the instance for the first time
|
||||
NEODB_SECRET_KEY=change_me
|
||||
NEODB_SITE_NAME=Example Site
|
||||
NEODB_SITE_DOMAIN=example.site
|
||||
|
||||
# HTTP port your reverse proxy should set request to
|
||||
# NEODB_PORT=8000
|
||||
|
||||
# Path to store db/media/cache/etc, must be writable
|
||||
# NEODB_DATA=/var/lib/neodb
|
||||
|
||||
# Scaling parameters
|
||||
# NEODB_WEB_WORKER_NUM=32
|
||||
# TAKAHE_WEB_WORKER_NUM=32
|
||||
# TAKAHE_STATOR_CONCURRENCY=10
|
||||
# TAKAHE_STATOR_CONCURRENCY_PER_MODEL=10
|
||||
|
||||
# Turn on DEBUG mode, either set this to True or don't set it at all
|
||||
# NEODB_DEBUG=True
|
|
@ -1,5 +1,5 @@
|
|||
[tool.pyright]
|
||||
exclude = [ "media", ".venv", ".git", "playground", "catalog/*/tests.py", "neodb", "**/migrations", "**/sites/douban_*" ]
|
||||
exclude = [ "media", ".venv", ".git", "playground", "catalog/*/tests.py", "neodb", "**/migrations", "**/sites/douban_*", "neodb-takahe" ]
|
||||
|
||||
[tool.djlint]
|
||||
ignore="T002,T003,H006,H019,H020,H021,H023,H030,H031"
|
||||
|
|
|
@ -23,7 +23,6 @@ django-user-messages
|
|||
dnspython
|
||||
easy-thumbnails
|
||||
filetype
|
||||
fontawesomefree
|
||||
gunicorn
|
||||
httpx
|
||||
igdb-api-v4
|
||||
|
|
|
@ -506,4 +506,55 @@ class Migration(migrations.Migration):
|
|||
"db_table": "users_inboxmessage",
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name="Config",
|
||||
fields=[
|
||||
(
|
||||
"id",
|
||||
models.BigAutoField(
|
||||
auto_created=True,
|
||||
primary_key=True,
|
||||
serialize=False,
|
||||
verbose_name="ID",
|
||||
),
|
||||
),
|
||||
("key", models.CharField(max_length=500)),
|
||||
("json", models.JSONField(blank=True, null=True)),
|
||||
("image", models.ImageField(blank=True, null=True, upload_to="")),
|
||||
(
|
||||
"domain",
|
||||
models.ForeignKey(
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="configs",
|
||||
to="takahe.domain",
|
||||
),
|
||||
),
|
||||
(
|
||||
"identity",
|
||||
models.ForeignKey(
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="configs",
|
||||
to="takahe.identity",
|
||||
),
|
||||
),
|
||||
(
|
||||
"user",
|
||||
models.ForeignKey(
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
related_name="configs",
|
||||
to="takahe.user",
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
"db_table": "core_config",
|
||||
"unique_together": {("key", "user", "identity", "domain")},
|
||||
},
|
||||
),
|
||||
]
|
||||
|
|
|
@ -342,9 +342,9 @@ class Takahe:
|
|||
content: str,
|
||||
visibility: Visibilities,
|
||||
data: dict | None = None,
|
||||
reply_to_pk: int | None = None,
|
||||
post_pk: int | None = None,
|
||||
post_time: datetime.datetime | None = None,
|
||||
reply_to_pk: int | None = None,
|
||||
) -> int | None:
|
||||
identity = Identity.objects.get(pk=author_pk)
|
||||
post = (
|
||||
|
@ -403,7 +403,7 @@ class Takahe:
|
|||
if user.preference.mastodon_append_tag
|
||||
else ""
|
||||
)
|
||||
stars = _rating_to_emoji(mark.rating_grade, 0)
|
||||
stars = _rating_to_emoji(mark.rating_grade, 1)
|
||||
item_link = f"{settings.SITE_INFO['site_url']}/~neodb~{mark.item.url}"
|
||||
|
||||
pre_conetent = (
|
||||
|
@ -518,7 +518,9 @@ class Takahe:
|
|||
return post.stats or {}
|
||||
|
||||
@staticmethod
|
||||
def get_post_replies(post_pk: int, identity_pk: int | None):
|
||||
def get_post_replies(post_pk: int | None, identity_pk: int | None):
|
||||
if not post_pk:
|
||||
return Post.objects.none()
|
||||
node = Post.objects.filter(pk=post_pk).first()
|
||||
if not node:
|
||||
return Post.objects.none()
|
||||
|
|
|
@ -137,9 +137,6 @@ class Migration(migrations.Migration):
|
|||
),
|
||||
),
|
||||
],
|
||||
managers=[
|
||||
("objects", django.contrib.auth.models.UserManager()),
|
||||
],
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name="Preference",
|
||||
|
|
|
@ -47,7 +47,7 @@ def init_domain(apps, schema_editor):
|
|||
key="site_name",
|
||||
user=None,
|
||||
identity=None,
|
||||
domain=domain,
|
||||
domain_id=domain,
|
||||
defaults={"json": name},
|
||||
)
|
||||
|
||||
|
|
|
@ -76,8 +76,10 @@ class APIdentity(models.Model):
|
|||
|
||||
@property
|
||||
def avatar(self):
|
||||
# return self.takahe_identity.icon_uri or static("img/avatar.svg") # fixme
|
||||
return f"/proxy/identity_icon/{self.pk}/"
|
||||
if self.local:
|
||||
return self.takahe_identity.icon_uri or static("img/avatar.svg")
|
||||
else:
|
||||
return f"/proxy/identity_icon/{self.pk}/"
|
||||
|
||||
@property
|
||||
def url(self):
|
||||
|
|
Loading…
Add table
Reference in a new issue