Compare commits

...

No commits in common. "archive_01" and "master" have entirely different histories.

32 changed files with 288 additions and 742 deletions

14
.gitignore vendored
View file

@ -7,21 +7,29 @@
# Ignore everything inside Containers/* # Ignore everything inside Containers/*
/Containers/*/* /Containers/*/*
!/Containers/*/compose.yml*
# Some (safe) configs files # Except compose.yml and tailscale serve
!/Containers/*/compose.yaml
!/Containers/*/serveconfig/
# navidrome toml for auto-clean
!/Containers/Navidrome/data/ !/Containers/Navidrome/data/
/Containers/Navidrome/data/* /Containers/Navidrome/data/*
!/Containers/Navidrome/data/navidrome.toml !/Containers/Navidrome/data/navidrome.toml
!/Containers/HomeAssistant/config/
/Containers/HomeAssistant/config/*
!/Containers/HomeAssistant/config/configuration.yaml
# Scripts # Scripts
!/Scripts/* !/Scripts/*
# VMS # VMS (not used for the moment)
!/VMs/* !/VMs/*
# Keep utility files in root # Keep utility files in root
!.gitignore !.gitignore
!start_all.sh !start_all.sh
!stop_all.sh !stop_all.sh
!stop_very_all.sh
!README.md !README.md

View file

@ -1,16 +0,0 @@
services:
audiobookshelf:
restart: unless-stopped
image: ghcr.io/advplyr/audiobookshelf:latest
ports:
- 13378:80
volumes:
- /home/criz/Medias/Audiobooks:/audiobooks
- /home/criz/Medias/Podcasts:/podcasts
- ./config:/config
- ./metadata:/metadata
environment:
- TZ=Europe/Madrid
labels:
tsdproxy.enable: "true"
tsdproxy.name: "audio"

View file

@ -1,10 +0,0 @@
services:
bentopdf:
image: bentopdf/bentopdf-simple:latest
container_name: bentopdf
restart: unless-stopped
ports:
- '8934:8080'
labels:
tsdproxy.enable: "true"
tsdproxy.name: "pdf"

View file

@ -1,28 +0,0 @@
services:
beszel:
image: henrygd/beszel
container_name: beszel
restart: unless-stopped
ports:
- 8090:8090
volumes:
- ./beszel_data:/beszel_data
labels:
tsdproxy.enable: "true"
tsdproxy.name: "dash"
beszel-agent:
image: henrygd/beszel-agent
container_name: beszel-agent
restart: unless-stopped
network_mode: host
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./beszel_agent_data:/var/lib/beszel-agent
# monitor other disks / partitions by mounting a folder in /extra-filesystems
- /mnt/disk/.beszel:/extra-filesystems/nvme0n1p2:ro
environment:
LISTEN: 45876
KEY: ${KEY}
TOKEN: ${TOKEN}
HUB_URL: ${HUB_URL}

View file

@ -1,11 +1,27 @@
networks:
forgejo:
external: false
services: services:
forgejo-ts:
image: tailscale/tailscale
container_name: forgejo-ts
cap_add:
- net_admin
devices:
- /dev/net/tun:/dev/net/tun
volumes:
- ./tailscale/state:/var/lib/tailscale
- ./serveconfig:/config
environment:
- TS_AUTHKEY=${TS_AUTHKEY}
- TS_EXTRA_ARGS=--advertise-tags=tag:forgejo --reset
- TS_STATE_DIR=/var/lib/tailscale
- TS_SERVE_CONFIG=/config/serve-config.json
- TS_USERSPACE=false
hostname: git
restart: unless-stopped
forgejo: forgejo:
image: codeberg.org/forgejo/forgejo:11 image: codeberg.org/forgejo/forgejo:11
container_name: forgejo container_name: forgejo
network_mode: service:forgejo-ts
environment: environment:
- USER_UID=1000 - USER_UID=1000
- USER_GID=1000 - USER_GID=1000
@ -13,31 +29,19 @@ services:
- FORGEJO__database__HOST=forgejo_db:5432 - FORGEJO__database__HOST=forgejo_db:5432
- FORGEJO__database__NAME=forgejo - FORGEJO__database__NAME=forgejo
- FORGEJO__database__USER=forgejo - FORGEJO__database__USER=forgejo
- FORGEJO__database__SSL_MODE=disable
- FORGEJO__database__PASSWD=${POSTGRES_PASSWORD} - FORGEJO__database__PASSWD=${POSTGRES_PASSWORD}
- FORGEJO__database__SSL_MODE=disable
- FORGEJO__database__SCHEMA=public - FORGEJO__database__SCHEMA=public
- FORGEJO__database__CHARSET=utf8 - FORGEJO__database__CHARSET=utf8
restart: unless-stopped
networks:
- forgejo
volumes: volumes:
- ./forgejo:/data - ./forgejo:/data
- /etc/timezone:/etc/timezone:ro # - /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro - /etc/localtime:/etc/localtime:ro
ports: restart: unless-stopped
- '3000:3000'
labels:
tsdproxy.enable: "true"
tsdproxy.name: "git"
tsdproxy.container_port: 3000
tsdproxy.funnel: "true"
forgejo_db: forgejo_db:
image: postgres:16 image: postgres:16
container_name: forgejo-postgres container_name: forgejo-postgres
restart: always
networks:
- forgejo
environment: environment:
- POSTGRES_DB=forgejo - POSTGRES_DB=forgejo
- POSTGRES_USER=forgejo - POSTGRES_USER=forgejo
@ -45,3 +49,4 @@ services:
- POSTGRES_HOST_AUTH_METHOD=md5 - POSTGRES_HOST_AUTH_METHOD=md5
volumes: volumes:
- ./postgres:/var/lib/postgresql/data - ./postgres:/var/lib/postgresql/data
restart: always

View file

@ -0,0 +1,19 @@
{
"TCP": {
"443": {
"HTTPS": true
}
},
"Web": {
"${TS_CERT_DOMAIN}:443": {
"Handlers": {
"/": {
"Proxy": "http://127.0.0.1:3000"
}
}
}
},
"AllowFunnel": {
"${TS_CERT_DOMAIN}:443": true
}
}

View file

@ -1,17 +0,0 @@
services:
gotify:
container_name: gotify
restart: always
image: gotify/server
ports:
- 8084:80
environment:
GOTIFY_DEFAULTUSER_PASS: 'admin'
volumes:
- './gotify_data:/app/data'
labels:
tsdproxy.enable: "true"
tsdproxy.name: "gotify"
# to run gotify as a dedicated user:
# sudo chown -R 1234:1234 ./gotify_data
# user: "1234:1234"

View file

@ -0,0 +1,34 @@
services:
home-assistant-ts:
image: tailscale/tailscale
container_name: home-assistant-ts
cap_add:
- net_admin
devices:
- /dev/net/tun:/dev/net/tun
volumes:
- ./tailscale/state:/var/lib/tailscale
- ./serveconfig:/config
environment:
- TS_AUTHKEY=${TS_AUTHKEY}
- TS_EXTRA_ARGS=--advertise-tags=tag:home-assistant --reset
- TS_STATE_DIR=/var/lib/tailscale
- TS_SERVE_CONFIG=/config/serve-config.json
- TS_USERSPACE=false
hostname: home
restart: unless-stopped
home-assistant:
container_name: home-assistant
image: "ghcr.io/home-assistant/home-assistant:stable"
volumes:
- ./config:/config
- /etc/localtime:/etc/localtime:ro
- /run/dbus:/run/dbus:ro
devices:
- /dev/ttyUSB0:/dev/ttyUSB0
restart: unless-stopped
network_mode: service:home-assistant-ts
environment:
TZ: Europe/France

View file

@ -0,0 +1,18 @@
# Loads default set of integrations. Do not remove.
default_config:
# Load frontend themes from the themes folder
frontend:
themes: !include_dir_merge_named themes
automation: !include automations.yaml
script: !include scripts.yaml
scene: !include scenes.yaml
http:
use_x_forwarded_for: true
trusted_proxies:
- 127.0.0.1
- 100.64.0.0/10

View file

@ -0,0 +1,19 @@
{
"TCP": {
"443": {
"HTTPS": true
}
},
"Web": {
"${TS_CERT_DOMAIN}:443": {
"Handlers": {
"/": {
"Proxy": "http://127.0.0.1:8123"
}
}
}
},
"AllowFunnel": {
"${TS_CERT_DOMAIN}:443": false
}
}

View file

@ -1,77 +0,0 @@
#
# WARNING: To install Immich, follow our guide: https://immich.app/docs/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
name: immich
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/data
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- '2283:2283'
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
labels:
tsdproxy.enable: "true"
tsdproxy.name: "photos"
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:8-bookworm@sha256:a137a2b60aca1a75130022d6bb96af423fefae4eb55faf395732db3544803280
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:32324a2f41df5de9efe1af166b7008c3f55646f8d0e00d9550c16c9822366b4a
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
# DB_STORAGE_TYPE: 'HDD'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
shm_size: 128mb
restart: always
volumes:
model-cache:

View file

@ -1,20 +0,0 @@
services:
jellyfin:
restart: 'unless-stopped'
image: jellyfin/jellyfin
container_name: jellyfin
network_mode: 'host'
volumes:
- ./config:/config
- ./cache:/cache
- type: bind
source: /mnt/moviesHDD/JellyfinMedias
target: /media
devices:
- /dev/dri:/dev/dri
group_add:
- 44
- 922
labels:
tsdproxy.enable: "true"
tsdproxy.name: "stream"

View file

@ -1,14 +0,0 @@
services:
metube:
image: ghcr.io/alexta69/metube:latest
container_name: metube
restart: unless-stopped
ports:
- "8083:8081"
volumes:
- /home/criz/Medias/metube:/downloads
environment:
- MAX_CONCURRENT_DOWNLOADS=6
labels:
tsdproxy.enable: "true"
tsdproxy.name: "ytb-dl"

View file

@ -1,19 +0,0 @@
services:
n8n:
container_name: n8n
image: docker.n8n.io/n8nio/n8n
restart: always
ports:
- 5678:5678
environment:
# - N8N_PROTOCOL=http
- N8N_PORT=5678
- NODE_ENV=production
- WEBHOOK_URL=n8n.rufous-trench.ts.net
volumes:
- ./n8n:/home/node/.n8n
- ./local-files:/files
labels:
tsdproxy.enable: "true"
tsdproxy.name: "n8n"
tsdproxy.funnel: "true"

View file

@ -0,0 +1,31 @@
services:
navidrome-ts:
image: tailscale/tailscale
container_name: navidrome-ts
cap_add:
- net_admin
devices:
- /dev/net/tun:/dev/net/tun
volumes:
- ./tailscale/state:/var/lib/tailscale
- ./serveconfig:/config
environment:
- TS_AUTHKEY=${TS_AUTHKEY}
- TS_EXTRA_ARGS=--advertise-tags=tag:navidrome --reset
- TS_STATE_DIR=/var/lib/tailscale
- TS_SERVE_CONFIG=/config/serve-config.json
- TS_USERSPACE=false
hostname: music
restart: unless-stopped
navidrome:
network_mode: service:navidrome-ts
container_name: navidrome
image: deluan/navidrome:latest
user: 1000:1000 # should be owner of volumes
# ports:
# - "4533:4533"
restart: unless-stopped
volumes:
- "./data:/data"
- "/mnt/usb-ssd-01/music:/music:ro"

View file

@ -1,15 +0,0 @@
services:
navidrome:
container_name: navidrome
image: deluan/navidrome:latest
user: 1000:1000 # should be owner of volumes
ports:
- "4533:4533"
restart: unless-stopped
volumes:
- "./data:/data"
- "/home/criz/Medias/Music:/music:ro"
labels:
tsdproxy.enable: "true"
tsdproxy.name: "music"
tsdproxy.funnel: "true"

View file

@ -0,0 +1,19 @@
{
"TCP": {
"443": {
"HTTPS": true
}
},
"Web": {
"${TS_CERT_DOMAIN}:443": {
"Handlers": {
"/": {
"Proxy": "http://127.0.0.1:4533"
}
}
}
},
"AllowFunnel": {
"${TS_CERT_DOMAIN}:443": true
}
}

View file

@ -1,53 +1,60 @@
services: services:
nextcloud-ts:
image: tailscale/tailscale
container_name: nextcloud-ts
cap_add:
- net_admin
volumes:
- ./tailscale/state:/var/lib/tailscale
- ./serveconfig:/config
devices:
- /dev/net/tun:/dev/net/tun
environment:
- TS_AUTHKEY=${TS_AUTHKEY}
- TS_EXTRA_ARGS=--advertise-tags=tag:nextcloud --reset
- TS_STATE_DIR=/var/lib/tailscale
- TS_SERVE_CONFIG=/config/serve-config.json
- TS_USERSPACE=false
hostname: cloud
restart: unless-stopped
# Nextcloud # Nextcloud
nextcloud: nextcloud:
image: nextcloud image: nextcloud
container_name: nextcloud container_name: nextcloud
restart: always restart: always
ports: network_mode: "service:nextcloud-ts"
- "8081:80"
networks:
- cloud
volumes: volumes:
- ./nextcloud_data:/var/www/html - ./nextcloud:/var/www/html
- /home/criz/Medias:/mnt/medias - /mnt/usb-ssd-01/nextcloud/nextcloud-data:/var/www/html/data
- /mnt/moviesHDD:/mnt/movieshdd
- /mnt/usb-ssd-01/metube:/mnt/metube
- /mnt/usb-ssd-01/music:/mnt/music
- /mnt/usb-ssd-01/nextcloud/ext1:/mnt/blender
environment: environment:
- PUID=1000
- PGID=1000
- REDIS_HOST=redis - REDIS_HOST=redis
- PHP_MEMORY_LIMIT=4G - PHP_MEMORY_LIMIT=4G
- PHP_UPLOAD_LIMIT=64G - PHP_UPLOAD_LIMIT=64G
- PHP_OPCACHE_ENABLE=1 - PHP_OPCACHE_ENABLE=1
- PHP_OPCACHE_MEMORY_CONSUMPTION=256 - PHP_OPCACHE_MEMORY_CONSUMPTION=256
- PHP_OPCACHE_MAX_ACCELERATED_FILES=100000 - PHP_OPCACHE_MAX_ACCELERATED_FILES=100000
- TRUSTED_PROXIES=100.64.0.0/10 - TRUSTED_PROXIES=100.64.0.0/10 127.0.0.1
- OVERWRITEPROTOCOL=https - OVERWRITEPROTOCOL=https
- OVERWRITEHOST=cloud.rufous-trench.ts.net - OVERWRITEHOST=cloud.rufous-trench.ts.net
labels:
tsdproxy.enable: "true"
tsdproxy.name: "cloud"
tsdproxy.funnel: "true"
tsdproxy.ephemeral: "false"
redis: redis:
image: redis:alpine image: redis:alpine
container_name: redis container_name: redis
restart: always restart: always
volumes: volumes:
- ./redis:/data - ./redis:/data
networks:
- cloud
mariadb: mariadb:
image: mariadb:10.11 image: mariadb:10.11
container_name: nextcloud_db container_name: nextcloud_db
restart: always restart: always
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW
networks:
- cloud
volumes: volumes:
- ./mariadb:/var/lib/mysql - ./mariadb:/var/lib/mysql
environment: environment:
@ -57,5 +64,4 @@ services:
- MYSQL_PASSWORD=${MYSQL_PASSWORD} - MYSQL_PASSWORD=${MYSQL_PASSWORD}
networks:
cloud:

View file

@ -0,0 +1,19 @@
{
"TCP": {
"443": {
"HTTPS": true
}
},
"Web": {
"${TS_CERT_DOMAIN}:443": {
"Handlers": {
"/": {
"Proxy": "http://127.0.0.1:80"
}
}
}
},
"AllowFunnel": {
"${TS_CERT_DOMAIN}:443": true
}
}

View file

@ -1,59 +0,0 @@
version: "3.9"
services:
backup-sync:
image: eeacms/rsync:latest
container_name: backup_sync
restart: unless-stopped
volumes:
# Logs
- ./logs:/var/log
# Medias folder
- type: bind
source: /home/criz/Medias
target: /source/Medias
read_only: true
- type: bind
source: /mnt/backupSD/Medias
target: /dest/Medias
# Immich folder
- type: bind
source: /home/criz/SelfHosting/Containers/Immich/library
target: /source/Immich
read_only: true
- type: bind
source: /mnt/backupSD/Immich
target: /dest/Immich
# Nextcloud
- type: bind
source: /home/criz/SelfHosting/Containers/NextCloud/nextcloud_data/data/Crizomb/files
target: /source/NextCloud
read_only: true
- type: bind
source: /mnt/backupSD/NextCloud
target: /dest/NextCloud
# Forgejo
- type: bind
source: /home/criz/SelfHosting/Containers/Forgejo/forgejo/git
target: /source/Forgejo/git
read_only: true
- type: bind
source: /home/criz/SelfHosting/Containers/Forgejo/forgejo/gitea
target: /source/Forgejo/gitea
read_only: true
- type: bind
source: /mnt/backupSD/Forgejo/git
target: /dest/Forgejo/git
- type: bind
source: /mnt/backupSD/Forgejo/gitea
target: /dest/Forgejo/gitea
environment:
- TZ=Europe/Madrid
- CRON_TASK_1=0 4 * * * rsync -av --delete /source/Medias/ /dest/Medias/
- CRON_TASK_2=0 4 * * * rsync -av --delete /source/Immich/ /dest/Immich/
- CRON_TASK_3=0 4 * * * rsync -av --delete /source/NextCloud/ /dest/NextCloud/
- CRON_TASK_4=0 4 * * * rsync -av --delete /source/Forgejo/git /dest/Forgejo/git/
- CRON_TASK_5=0 4 * * * rsync -av --delete /source/Forgejo/gitea /dest/Forgejo/gitea/
command: ["client"]

View file

@ -1,15 +0,0 @@
services:
## tsdproxy
tsdproxy:
image: almeidapaulopt/tsdproxy:latest
container_name: tsdproxy
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./data:/data
environment:
# Get AuthKey from your Tailscale account
- TSDPROXY_AUTHKEY=${TSD_PROXY_AUTH}
- DOCKER_HOST=unix:///var/run/docker.sock
restart: always

View file

@ -1,29 +0,0 @@
version: "3.8"
services:
uptime-kuma:
image: louislam/uptime-kuma:latest
container_name: uptime-kuma
restart: always
ports:
- "2301:3001" # This maps the container port "3001" to the host port "3001"
volumes:
- /path/to/data:/app/data # Configuring persistent storage
environment:
- TZ=UTC+1 # Set the timezone (change to your preferred local timezone so monitoring times are the same)
- UMASK=0022 # Set your file permissions manually
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3001"]
interval: 30s
retries: 3
start_period: 10s
timeout: 5s
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
labels:
tsdproxy.enable: "true"
tsdproxy.name: "uptime"

View file

@ -1,59 +1,90 @@
# Selfhosting personnal repo # Selfhosting personnal repo
(Old setup on ideapad, using TSDProxy)
My very simple self-hosting setup. My very simple self-hosting setup.
TSDProxy does heavy lifting to make things simple. On my new mini-pc.
Gettings things simpler & simpler overtime.
Check branch : archive_01 for old setup.
Not 100% reproductible (Maybe one day NixOs?), some light debugging & manual adjust when migrating. See random dump at the end
## Hardware & OS ## Hardware & OS
- ideapad 15ADA05 laptop - Origimagic C2 Neo Mini PC
- Ryzen 5 3500u - N95 Alder Lake
- 12gb ddr4 - 12gb ddr5
- 512gb nvme - 512gb internal nvme
- 1 To external USB SSD (For nextcloud files)
## Stack ## Stack
- Debian13 - Debian13
- Docker
- Incus for VMs management
- Tailscale (VPN) - Tailscale (VPN)
- TSDProxy (Tailscale proxy to have easily a domain per service) - Docker (with compose)
## Networking
Heavy use of Tailscale sidecar (see network_mode: "service:myapp-ts") :
- Https without much efforts
- Easy domain name with MagicDNS (ending in rufous-trench.ts.net).
- Both VPN access for best-perf, and funnel for public access without opening ports.
Inconvenients :
- Vendor lock-in for networking. Even if tailscale seems cooler than cloudflare, it's still a vendor lock-in
- Not so wide adoption, need often manual thinkering
## Services ## Services
- Forgejo (git repo management) - Forgejo (git repo management)
- Nextcloud (google drive replacement) - Nextcloud (google drive replacement)
- Navidrome (Music streaming) - Navidrome (Music streaming)
- Metube (Video / Music downloader)
- n8n (automation with graphs, selfhosted zapier)
- Beszel (Ressources monitoring)
- Immich (google photos replacement)
- Bentopdf (ilovepdf.com replacement)
- AudiobookShelf (audibooks & podcasts)
- Affine (notes taking, obsidian replacement)
- Gotify (Simple notifcation system)
- PaperlessNGNX (OCR things)
- UptimeKuma (Healthchecks)
## CI ## CI
Done with a Forgejo runner in a VM. See VMs/ folder for more details. - Forgejo runners on my school computers for the moment lol. With podman for easy rootless containers.
## Backup ## Backup
RSync backup important things (git, lfs, nextcloud documents) to an SD Card. #TODO rsync thingy to family NAS
Not ideal
## Random dump # Random dump / doc
Medias is mounted on nextcloud, and other things (just me tube for the moment) ### Nextcloud
group & user : 33, chown nextcloud folders
Music folder owned by 1000 (for navidrome access)
But I want nextcloud to also be able to move those files (easy access & add)
Solution :
```bash ```bash
sudo setfacl -R -m u:33:rwx /home/yourusername/Medias sudo apt install acl
sudo setfacl -R -m d:u:33:rwx /home/yourusername/Medias sudo setfacl -R -m u:33:rwx /.../.../Music
``` ```
and metube doesn't like deleting videos externally Also Nextcloud crontab thingy
Tailscale KEY last 3 month. Do not forget to update it (inside .env of TSDProxy)
Nextcloud was a pain in the ass, do not use cringe ass AIO container. Works fine now.
```bash
sudo crontab -e
*/5 * * * * docker exec -u www-data nextcloud php /var/www/html/cron.php
```
### Forgejo
postgres complaining when networking change : Check pg_hba.conf, change it if needed
### Navidrome
Because external mess, Nextcloud can modify files etc...
```toml
[scanner]
PurgeMissing = "always"
```
## TODO / Random vaguely related projects
- Home assistant
- Old laptop as test server (probably on new branch)
- Wake on Wan for computer desktop via rasberry pi
- Old phones used for dashboard
- Maybe graphana things one day

View file

@ -1 +0,0 @@
Pas encore utilisé

View file

@ -1,12 +0,0 @@
# Forgejo runner setup
Run forgejo runners instance inside a VM to do very dirty things but securely. <br><br>
The "dirty things" is just having forgejo running in priviliged container, with direct access to the host (VM) docker socket. <br><br>
Runners can easily get full access to host, but the host is a VM so it's ok <br><br>
I use [incus](https://linuxcontainers.org/incus/) to create the VM. I love it, very simple, docker like interface <br><br>
See launch_forgejo_runner_vm.sh for more information about the VM setup. <br><br>
To setup, you still need to enter VM, and follow forgejo [runner instalation guide](https://forgejo.org/docs/next/admin/actions/runner-installation/) <br><br>
It's still a little bit manual, but it's done in 30s

View file

@ -1,22 +0,0 @@
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# VERY INSECURE RUN THIS IN THE INCUS VM SEE LAUNCH SH FILE
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
services:
forgejo-runner:
image: data.forgejo.org/forgejo/runner:11
container_name: forgejo-runner
environment:
# Let the runner use the host Docker daemon via socket
DOCKER_HOST: unix:///var/run/docker.sock
# Run as root so the socket permissions dont block access
user: 0:0
# Mount host docker.sock for sibling container access
volumes:
- ./data:/data
- /var/run/docker.sock:/var/run/docker.sock
- ./volume/config.yml:/config.yml
restart: unless-stopped
privileged: true
#command: /bin/sh -c "sleep 5; forgejo-runner daemon"
command: '/bin/sh -c "while : ; do sleep 1 ; done ;"'

View file

@ -1,194 +0,0 @@
# Example configuration file, it's safe to copy this as the default config file without any modification.
# You don't have to copy this file to your instance,
# just run `forgejo-runner generate-config > config.yaml` to generate a config file.
#
# The value of level or job_level can be trace, debug, info, warn, error or fatal
#
log:
#
# What is displayed in the output of the runner process but not sent
# to the Forgejo instance.
#
level: info
#
# What is sent to the Forgejo instance and therefore
# visible in the web UI for a given job.
#
job_level: info
runner:
# Where to store the registration result.
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
envs:
A_TEST_ENV_NAME_1: a_test_env_value_1
A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Forgejo instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Forgejo instance if it's timeout is shorter than this.
timeout: 3h
# The timeout for the runner to wait for running jobs to finish when
# shutting down because a TERM or INT signal has been received. Any
# running jobs that haven't finished after this timeout will be
# cancelled.
# If unset or zero the jobs will be cancelled immediately.
shutdown_timeout: 3h
# Whether skip verifying the TLS certificate of the instance.
insecure: false
# The timeout for fetching the job from the Forgejo instance.
fetch_timeout: 5s
# The interval for fetching the job from the Forgejo instance.
fetch_interval: 2s
# The interval for reporting the job status and logs to the Forgejo instance.
report_interval: 1s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: ["macos-arm64:host", "ubuntu-latest:docker://node:20-bookworm", "ubuntu-22.04:docker://node:20-bookworm"]
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when executing the `daemon`, it will use labels in the `.runner` file.
labels: []
cache:
#
# When enabled, workflows will be given the ACTIONS_CACHE_URL environment variable
# used by the https://code.forgejo.org/actions/cache action. The server at this
# URL must implement a compliant REST API and it must also be reachable from
# the container or host running the workflows.
#
# See also https://forgejo.org/docs/next/user/actions/advanced-features/#cache
#
# When it is not enabled, none of the following options apply.
#
# It works as follows:
#
# - the workflow is given a one time use ACTIONS_CACHE_URL
# - a cache proxy listens to ACTIONS_CACHE_URL
# - the cache proxy securely communicates with the cache server using
# a shared secret
#
enabled: true
#
#######################################################################
#
# Only used for the internal cache server.
#
# If external_server is not set, the Forgejo runner will spawn a
# cache server that will be used by the cache proxy.
#
#######################################################################
#
# The port bound by the internal cache server.
# 0 means to use a random available port.
#
port: 0
#
# The directory to store the cache data.
#
# If empty, the cache data will be stored in $HOME/.cache/actcache.
#
dir: ""
#
#######################################################################
#
# Only used for the external cache server.
#
# If external_server is set, the internal cache server is not
# spawned.
#
#######################################################################
#
# The URL of the cache server. The URL should generally end with
# "/". The cache proxy will forward requests to the external
# server. The requests are authenticated with the "secret" that is
# shared with the external server.
#
external_server: ""
#
# The shared cache secret used to secure the communications between
# the cache proxy and the cache server.
#
# If empty, it will be generated to a new secret automatically when
# the server starts and it will stay the same until it restarts.
#
secret: ""
#
#######################################################################
#
# Common to the internal and external cache server
#
#######################################################################
#
# The IP or hostname (195.84.20.30 or example.com) to use when constructing
# ACTIONS_CACHE_URL which is the URL of the cache proxy.
#
# If empty it will be detected automatically.
#
# If the containers or host running the workflows reside on a
# different network than the Forgejo runner (for instance when the
# docker server used to create containers is not running on the same
# host as the Forgejo runner), it may be impossible to figure that
# out automatically. In that case you can specify which IP or
# hostname to use to reach the internal cache server created by the
# Forgejo runner.
#
host: ""
#
# The port bound by the internal cache proxy.
# 0 means to use a random available port.
#
proxy_port: 0
#
# Overrides the ACTIONS_CACHE_URL variable passed to workflow
# containers. The URL should generally not end with "/". This should only
# be used if the runner host is not reachable from the workflow containers,
# and requires further setup.
#
actions_cache_url_override: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, create a network automatically.
network: ""
# Whether to create networks with IPv6 enabled. Requires the Docker daemon to be set up accordingly.
# Only takes effect if "network" is set to "".
enable_ipv6: false
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --volume /etc/ssl/certs:/etc/ssl/certs:ro).
options:
# The parent directory of a job's working directory.
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /etc/ssl/certs
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# Overrides the docker host set by the DOCKER_HOST environment variable, and mounts on the job container.
# If "-" or "", no docker host will be mounted in the job container
# If "automount", an available docker host will automatically be found and mounted in the job container (e.g. /var/run/docker.sock).
# If it's a url, the specified docker host will be mounted in the job container
# Example urls: unix:///run/docker.socket or ssh://user@host
# The specified socket is mounted within the job container at /var/run/docker.sock
docker_host: "-"
# Pull docker image(s) even if already present
force_pull: true
# Rebuild local docker image(s) even if already present
force_rebuild: false
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent:

View file

@ -1,9 +0,0 @@
#!/usr/bin/env bash
set -e
mkdir -p data/.cache
chown -R 1001:1001 data
chmod 775 data/.cache
chmod g+s data/.cache

View file

@ -1,106 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
########################################
# Configuration
########################################
VM_NAME="forgejo-runner-vm"
IMAGE="images:debian/trixie"
MEMORY="2GiB"
DISK="25GiB"
CPUS="4"
########################################
# Helper functions
########################################
log() {
echo "[+] $*"
}
vm_exists() {
incus info "$VM_NAME" >/dev/null 2>&1
}
########################################
# Create VM if needed
########################################
if vm_exists; then
log "VM '$VM_NAME' already exists, skipping creation"
else
log "Creating VM '$VM_NAME'"
incus launch "$IMAGE" "$VM_NAME" \
--vm \
-c limits.memory="$MEMORY" \
-c limits.cpu="$CPUS" \
--device root,size="$DISK"
fi
########################################
# Wait for VM to be ready
########################################
log "Waiting for VM to become ready"
incus exec "$VM_NAME" -- cloud-init status --wait >/dev/null 2>&1 || true
log "Waiting for Incus Agent to start"
# This loop tries a simple command until it succeeds or times out
RETRIES=0
MAX_RETRIES=30
until incus exec "$VM_NAME" -- uptime >/dev/null 2>&1; do
RETRIES=$((RETRIES + 1))
if [ $RETRIES -ge $MAX_RETRIES ]; then
echo "Error: Timeout waiting for VM agent to start."
exit 1
fi
echo "$RETRIES retries"
sleep 1
done
log "Agent is responsive. Proceeding..."
########################################
# Install Docker inside the VM
########################################
log "Installing Docker inside VM"
incus exec "$VM_NAME" -- bash -eux <<'EOF'
export DEBIAN_FRONTEND=noninteractive
# Base packages
apt-get update
apt-get install -y \
ca-certificates \
curl \
gnupg
# Docker GPG key
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/debian/gpg \
-o /etc/apt/keyrings/docker.asc
chmod a+r /etc/apt/keyrings/docker.asc
# Docker repository
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] \
https://download.docker.com/linux/debian \
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" \
> /etc/apt/sources.list.d/docker.list
# Install Docker
apt-get update
apt-get install -y \
docker-ce \
docker-ce-cli \
containerd.io \
docker-buildx-plugin \
docker-compose-plugin
# Enable Docker
systemctl enable docker
systemctl start docker
EOF
########################################
# Done
########################################
log "Docker successfully installed in VM '$VM_NAME'"
log "You can access it with:"
echo " incus exec $VM_NAME -- bash"

View file

@ -1,3 +1,3 @@
for d in Containers/*/ ; do for d in Containers/*/ ; do
docker compose -f "$d/compose.yml" up -d docker compose -f "$d/compose.yaml" up -d
done done

View file

@ -1,4 +1,3 @@
for d in Containers/*/ ; do for d in Containers/*/ ; do
docker compose -f "$d/compose.yml" down docker compose -f "$d/compose.yaml" down
done done

1
stop_very_all.sh Executable file
View file

@ -0,0 +1 @@
docker stop $(docker ps -a -q)