From e33e131de7d974f051bee337660d9fa6c0d251cc Mon Sep 17 00:00:00 2001 From: criz Date: Thu, 25 Dec 2025 00:29:30 +0100 Subject: [PATCH] ForgejoRunner --- .gitignore | 2 + Containers/ArchiveBox/compose.yml | 35 ++++ Containers/UptimeKuma/compose.yml | 29 +++ VMs/ForgejoRunner/README.md | 7 + VMs/ForgejoRunner/compose.yml | 22 ++ VMs/ForgejoRunner/config.yml | 194 ++++++++++++++++++ VMs/ForgejoRunner/create_data.sh | 9 + VMs/ForgejoRunner/launch_forgejo_runner_vm.sh | 104 ++++++++++ 8 files changed, 402 insertions(+) create mode 100644 Containers/ArchiveBox/compose.yml create mode 100644 Containers/UptimeKuma/compose.yml create mode 100644 VMs/ForgejoRunner/README.md create mode 100644 VMs/ForgejoRunner/compose.yml create mode 100644 VMs/ForgejoRunner/config.yml create mode 100644 VMs/ForgejoRunner/create_data.sh create mode 100755 VMs/ForgejoRunner/launch_forgejo_runner_vm.sh diff --git a/.gitignore b/.gitignore index db4d37a..f7d0ad5 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,8 @@ # Scripts !/Scripts/* +# VMS +!/VMs/* # Keep utility files in root !.gitignore diff --git a/Containers/ArchiveBox/compose.yml b/Containers/ArchiveBox/compose.yml new file mode 100644 index 0000000..115fc79 --- /dev/null +++ b/Containers/ArchiveBox/compose.yml @@ -0,0 +1,35 @@ +# Usage: +# mkdir -p ~/archivebox/data && cd ~/archivebox +# curl -fsSL 'https://docker-compose.archivebox.io' > docker-compose.yml +# docker compose run archivebox version +# docker compose run archivebox config --set SAVE_ARCHIVE_DOT_ORG=False +# docker compose run archivebox add --depth=1 'https://news.ycombinator.com' +# docker compose run -T archivebox add < bookmarks.txt +# docker compose up -d && open 'https://localhost:8000' +# docker compose run archivebox help +# Documentation: +# https://github.com/ArchiveBox/ArchiveBox/wiki/Docker#docker-compose + +services: + archivebox: + image: archivebox/archivebox:latest + ports: + - 8647:8000 + volumes: + - ./data:/data + # ./data/personas/Default/chrome_profile/Default:/data/personas/Default/chrome_profile/Default + environment: + - ALLOWED_HOSTS=* # set this to the hostname(s) you're going to serve the site from! + - CSRF_TRUSTED_ORIGINS=https://archive.rufous-trench.ts.net # you MUST set this to the server's URL for admin login and the REST API to work + - PUBLIC_INDEX=True # set to False to prevent anonymous users from viewing snapshot list + - PUBLIC_SNAPSHOTS=True # set to False to prevent anonymous users from viewing snapshot content + - PUBLIC_ADD_VIEW=False # set to True to allow anonymous users to submit new URLs to archive + # ... + # For more info, see: https://github.com/ArchiveBox/ArchiveBox/wiki/Docker#configuration + labels: + tsdproxy.enable: "true" + tsdproxy.name: "archive" + + + + diff --git a/Containers/UptimeKuma/compose.yml b/Containers/UptimeKuma/compose.yml new file mode 100644 index 0000000..9cf87fe --- /dev/null +++ b/Containers/UptimeKuma/compose.yml @@ -0,0 +1,29 @@ +version: "3.8" + +services: + uptime-kuma: + image: louislam/uptime-kuma:latest + container_name: uptime-kuma + restart: always + ports: + - "2301:3001" # This maps the container port "3001" to the host port "3001" + volumes: + - /path/to/data:/app/data # Configuring persistent storage + environment: + - TZ=UTC+1 # Set the timezone (change to your preferred local timezone so monitoring times are the same) + - UMASK=0022 # Set your file permissions manually + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:3001"] + interval: 30s + retries: 3 + start_period: 10s + timeout: 5s + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + labels: + tsdproxy.enable: "true" + tsdproxy.name: "uptime" + diff --git a/VMs/ForgejoRunner/README.md b/VMs/ForgejoRunner/README.md new file mode 100644 index 0000000..bf451cb --- /dev/null +++ b/VMs/ForgejoRunner/README.md @@ -0,0 +1,7 @@ +# Forgejo runner setup +Run forgejo runners instance inside a VM to do very dirty things but securely +forgejo runner run as a docker container inside the VM, and have access to docker socket to create +docker containers for CI/CD things + +To setup, you still need to enter VM, and follow forgejo [runner instalation guide] (https://forgejo.org/docs/next/admin/actions/runner-installation/) +And copy paste lof of things diff --git a/VMs/ForgejoRunner/compose.yml b/VMs/ForgejoRunner/compose.yml new file mode 100644 index 0000000..fb55e78 --- /dev/null +++ b/VMs/ForgejoRunner/compose.yml @@ -0,0 +1,22 @@ +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# VERY INSECURE RUN THIS IN THE INCUS VM SEE LAUNCH SH FILE +# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +services: + forgejo-runner: + image: data.forgejo.org/forgejo/runner:11 + container_name: forgejo-runner + environment: + # Let the runner use the host Docker daemon via socket + DOCKER_HOST: unix:///var/run/docker.sock + # Run as root so the socket permissions don’t block access + user: 0:0 + # Mount host docker.sock for sibling container access + volumes: + - ./data:/data + - /var/run/docker.sock:/var/run/docker.sock + - ./volume/config.yml:/config.yml + restart: unless-stopped + privileged: true + #command: /bin/sh -c "sleep 5; forgejo-runner daemon" + command: '/bin/sh -c "while : ; do sleep 1 ; done ;"' diff --git a/VMs/ForgejoRunner/config.yml b/VMs/ForgejoRunner/config.yml new file mode 100644 index 0000000..bc3ca30 --- /dev/null +++ b/VMs/ForgejoRunner/config.yml @@ -0,0 +1,194 @@ +# Example configuration file, it's safe to copy this as the default config file without any modification. + +# You don't have to copy this file to your instance, +# just run `forgejo-runner generate-config > config.yaml` to generate a config file. + +# +# The value of level or job_level can be trace, debug, info, warn, error or fatal +# +log: + # + # What is displayed in the output of the runner process but not sent + # to the Forgejo instance. + # + level: info + # + # What is sent to the Forgejo instance and therefore + # visible in the web UI for a given job. + # + job_level: info + +runner: + # Where to store the registration result. + file: .runner + # Execute how many tasks concurrently at the same time. + capacity: 1 + # Extra environment variables to run jobs. + envs: + A_TEST_ENV_NAME_1: a_test_env_value_1 + A_TEST_ENV_NAME_2: a_test_env_value_2 + # Extra environment variables to run jobs from a file. + # It will be ignored if it's empty or the file doesn't exist. + env_file: .env + # The timeout for a job to be finished. + # Please note that the Forgejo instance also has a timeout (3h by default) for the job. + # So the job could be stopped by the Forgejo instance if it's timeout is shorter than this. + timeout: 3h + # The timeout for the runner to wait for running jobs to finish when + # shutting down because a TERM or INT signal has been received. Any + # running jobs that haven't finished after this timeout will be + # cancelled. + # If unset or zero the jobs will be cancelled immediately. + shutdown_timeout: 3h + # Whether skip verifying the TLS certificate of the instance. + insecure: false + # The timeout for fetching the job from the Forgejo instance. + fetch_timeout: 5s + # The interval for fetching the job from the Forgejo instance. + fetch_interval: 2s + # The interval for reporting the job status and logs to the Forgejo instance. + report_interval: 1s + # The labels of a runner are used to determine which jobs the runner can run, and how to run them. + # Like: ["macos-arm64:host", "ubuntu-latest:docker://node:20-bookworm", "ubuntu-22.04:docker://node:20-bookworm"] + # If it's empty when registering, it will ask for inputting labels. + # If it's empty when executing the `daemon`, it will use labels in the `.runner` file. + labels: [] + +cache: + # + # When enabled, workflows will be given the ACTIONS_CACHE_URL environment variable + # used by the https://code.forgejo.org/actions/cache action. The server at this + # URL must implement a compliant REST API and it must also be reachable from + # the container or host running the workflows. + # + # See also https://forgejo.org/docs/next/user/actions/advanced-features/#cache + # + # When it is not enabled, none of the following options apply. + # + # It works as follows: + # + # - the workflow is given a one time use ACTIONS_CACHE_URL + # - a cache proxy listens to ACTIONS_CACHE_URL + # - the cache proxy securely communicates with the cache server using + # a shared secret + # + enabled: true + # + ####################################################################### + # + # Only used for the internal cache server. + # + # If external_server is not set, the Forgejo runner will spawn a + # cache server that will be used by the cache proxy. + # + ####################################################################### + # + # The port bound by the internal cache server. + # 0 means to use a random available port. + # + port: 0 + # + # The directory to store the cache data. + # + # If empty, the cache data will be stored in $HOME/.cache/actcache. + # + dir: "" + # + ####################################################################### + # + # Only used for the external cache server. + # + # If external_server is set, the internal cache server is not + # spawned. + # + ####################################################################### + # + # The URL of the cache server. The URL should generally end with + # "/". The cache proxy will forward requests to the external + # server. The requests are authenticated with the "secret" that is + # shared with the external server. + # + external_server: "" + # + # The shared cache secret used to secure the communications between + # the cache proxy and the cache server. + # + # If empty, it will be generated to a new secret automatically when + # the server starts and it will stay the same until it restarts. + # + secret: "" + # + ####################################################################### + # + # Common to the internal and external cache server + # + ####################################################################### + # + # The IP or hostname (195.84.20.30 or example.com) to use when constructing + # ACTIONS_CACHE_URL which is the URL of the cache proxy. + # + # If empty it will be detected automatically. + # + # If the containers or host running the workflows reside on a + # different network than the Forgejo runner (for instance when the + # docker server used to create containers is not running on the same + # host as the Forgejo runner), it may be impossible to figure that + # out automatically. In that case you can specify which IP or + # hostname to use to reach the internal cache server created by the + # Forgejo runner. + # + host: "" + # + # The port bound by the internal cache proxy. + # 0 means to use a random available port. + # + proxy_port: 0 + # + # Overrides the ACTIONS_CACHE_URL variable passed to workflow + # containers. The URL should generally not end with "/". This should only + # be used if the runner host is not reachable from the workflow containers, + # and requires further setup. + # + actions_cache_url_override: "" + +container: + # Specifies the network to which the container will connect. + # Could be host, bridge or the name of a custom network. + # If it's empty, create a network automatically. + network: "" + # Whether to create networks with IPv6 enabled. Requires the Docker daemon to be set up accordingly. + # Only takes effect if "network" is set to "". + enable_ipv6: false + # Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker). + privileged: false + # And other options to be used when the container is started (eg, --volume /etc/ssl/certs:/etc/ssl/certs:ro). + options: + # The parent directory of a job's working directory. + # If it's empty, /workspace will be used. + workdir_parent: + # Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob + # You can specify multiple volumes. If the sequence is empty, no volumes can be mounted. + # For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to: + # valid_volumes: + # - data + # - /etc/ssl/certs + # If you want to allow any volume, please use the following configuration: + # valid_volumes: + # - '**' + valid_volumes: [] + # Overrides the docker host set by the DOCKER_HOST environment variable, and mounts on the job container. + # If "-" or "", no docker host will be mounted in the job container + # If "automount", an available docker host will automatically be found and mounted in the job container (e.g. /var/run/docker.sock). + # If it's a url, the specified docker host will be mounted in the job container + # Example urls: unix:///run/docker.socket or ssh://user@host + # The specified socket is mounted within the job container at /var/run/docker.sock + docker_host: "-" + # Pull docker image(s) even if already present + force_pull: true + # Rebuild local docker image(s) even if already present + force_rebuild: false + +host: + # The parent directory of a job's working directory. + # If it's empty, $HOME/.cache/act/ will be used. + workdir_parent: diff --git a/VMs/ForgejoRunner/create_data.sh b/VMs/ForgejoRunner/create_data.sh new file mode 100644 index 0000000..2a02ff9 --- /dev/null +++ b/VMs/ForgejoRunner/create_data.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +set -e + +mkdir -p data/.cache + +chown -R 1001:1001 data +chmod 775 data/.cache +chmod g+s data/.cache diff --git a/VMs/ForgejoRunner/launch_forgejo_runner_vm.sh b/VMs/ForgejoRunner/launch_forgejo_runner_vm.sh new file mode 100755 index 0000000..99a03fa --- /dev/null +++ b/VMs/ForgejoRunner/launch_forgejo_runner_vm.sh @@ -0,0 +1,104 @@ +#!/usr/bin/env bash +set -euo pipefail + +######################################## +# Configuration +######################################## +VM_NAME="forgejo-runner-vm" +IMAGE="images:debian/trixie" +MEMORY="2GiB" +CPUS="4" + +######################################## +# Helper functions +######################################## +log() { + echo "[+] $*" +} + +vm_exists() { + incus info "$VM_NAME" >/dev/null 2>&1 +} + +######################################## +# Create VM if needed +######################################## +if vm_exists; then + log "VM '$VM_NAME' already exists, skipping creation" +else + log "Creating VM '$VM_NAME'" + incus launch "$IMAGE" "$VM_NAME" \ + --vm \ + -c limits.memory="$MEMORY" \ + -c limits.cpu="$CPUS" +fi + +######################################## +# Wait for VM to be ready +######################################## +log "Waiting for VM to become ready" +incus exec "$VM_NAME" -- cloud-init status --wait >/dev/null 2>&1 || true + +log "Waiting for Incus Agent to start" +# This loop tries a simple command until it succeeds or times out +RETRIES=0 +MAX_RETRIES=30 +until incus exec "$VM_NAME" -- uptime >/dev/null 2>&1; do + RETRIES=$((RETRIES + 1)) + if [ $RETRIES -ge $MAX_RETRIES ]; then + echo "Error: Timeout waiting for VM agent to start." + exit 1 + fi + echo "$RETRIES retries" + sleep 1 +done + +log "Agent is responsive. Proceeding..." +######################################## +# Install Docker inside the VM +######################################## +log "Installing Docker inside VM" + +incus exec "$VM_NAME" -- bash -eux <<'EOF' +export DEBIAN_FRONTEND=noninteractive + +# Base packages +apt-get update +apt-get install -y \ + ca-certificates \ + curl \ + gnupg + +# Docker GPG key +install -m 0755 -d /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/debian/gpg \ + -o /etc/apt/keyrings/docker.asc +chmod a+r /etc/apt/keyrings/docker.asc + +# Docker repository +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] \ + https://download.docker.com/linux/debian \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" \ + > /etc/apt/sources.list.d/docker.list + +# Install Docker +apt-get update +apt-get install -y \ + docker-ce \ + docker-ce-cli \ + containerd.io \ + docker-buildx-plugin \ + docker-compose-plugin + +# Enable Docker +systemctl enable docker +systemctl start docker +EOF + +######################################## +# Done +######################################## +log "Docker successfully installed in VM '$VM_NAME'" +log "You can access it with:" +echo " incus exec $VM_NAME -- bash"