first commit

This commit is contained in:
CakesTwix 2024-11-30 18:57:35 +02:00
commit f6fc781514
14 changed files with 612 additions and 0 deletions

View file

@ -0,0 +1,21 @@
networks:
forgejo:
external: false
services:
server:
image: codeberg.org/forgejo/forgejo:9
container_name: forgejo
environment:
- USER_UID=1000
- USER_GID=1000
restart: always
networks:
- forgejo
volumes:
- ./data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- '3000:3000'
- '222:22'

View file

@ -0,0 +1,21 @@
services:
homeassistant:
image: "ghcr.io/home-assistant/home-assistant:stable"
container_name: homeassistant
privileged: true
network_mode: host
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Kiyv
cap_add:
- NET_ADMIN
- NET_RAW
volumes:
- /var/run/dbus:/var/run/dbus:z
- .config:/config:z
ports:
- 8123:8123 #optional
#devices:
# - /path/to/device:/path/to/device #optional
restart: unless-stopped

21
immich/.env Normal file
View file

@ -0,0 +1,21 @@
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
# The location where your uploaded files are stored
UPLOAD_LOCATION=/media/HDD4/Photos
# The location where your database files are stored
DB_DATA_LOCATION=./postgres
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
# TZ=Etc/UTC
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
IMMICH_VERSION=release
# Connection secret for postgres. You should change it to a random password
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
DB_PASSWORD=fucknopassforyou
# The values below this line do not need to be changed
###################################################################################
DB_USERNAME=postgres
DB_DATABASE_NAME=immich

76
immich/docker-compose.yml Normal file
View file

@ -0,0 +1,76 @@
#
# WARNING: Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
#
name: immich
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
extends:
file: hwaccel.transcoding.yml
service: vaapi # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- 2283:2283
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}-openvino
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:2d1463258f2764328496376f5d965f20c6a67f66ea2b06dc42af351f75248792
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: ["postgres", "-c", "shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
restart: always
volumes:
model-cache:

43
immich/hwaccel.ml.yml Normal file
View file

@ -0,0 +1,43 @@
# Configurations for hardware-accelerated machine learning
# If using Unraid or another platform that doesn't allow multiple Compose files,
# you can inline the config for a backend by copying its contents
# into the immich-machine-learning service in the docker-compose.yml file.
# See https://immich.app/docs/features/ml-hardware-acceleration for info on usage.
services:
armnn:
devices:
- /dev/mali0:/dev/mali0
volumes:
- /lib/firmware/mali_csffw.bin:/lib/firmware/mali_csffw.bin:ro # Mali firmware for your chipset (not always required depending on the driver)
- /usr/lib/libmali.so:/usr/lib/libmali.so:ro # Mali driver for your chipset (always required)
cpu: {}
cuda:
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities:
- gpu
openvino:
device_cgroup_rules:
- 'c 189:* rmw'
devices:
- /dev/dri:/dev/dri
volumes:
- /dev/bus/usb:/dev/bus/usb
openvino-wsl:
devices:
- /dev/dri:/dev/dri
- /dev/dxg:/dev/dxg
volumes:
- /dev/bus/usb:/dev/bus/usb
- /usr/lib/wsl:/usr/lib/wsl

View file

@ -0,0 +1,55 @@
# Configurations for hardware-accelerated transcoding
# If using Unraid or another platform that doesn't allow multiple Compose files,
# you can inline the config for a backend by copying its contents
# into the immich-microservices service in the docker-compose.yml file.
# See https://immich.app/docs/features/hardware-transcoding for more info on using hardware transcoding.
services:
cpu: {}
nvenc:
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities:
- gpu
- compute
- video
quicksync:
devices:
- /dev/dri:/dev/dri
rkmpp:
security_opt: # enables full access to /sys and /proc, still far better than privileged: true
- systempaths=unconfined
- apparmor=unconfined
group_add:
- video
devices:
- /dev/rga:/dev/rga
- /dev/dri:/dev/dri
- /dev/dma_heap:/dev/dma_heap
- /dev/mpp_service:/dev/mpp_service
#- /dev/mali0:/dev/mali0 # only required to enable OpenCL-accelerated HDR -> SDR tonemapping
volumes:
#- /etc/OpenCL:/etc/OpenCL:ro # only required to enable OpenCL-accelerated HDR -> SDR tonemapping
#- /usr/lib/aarch64-linux-gnu/libmali.so.1:/usr/lib/aarch64-linux-gnu/libmali.so.1:ro # only required to enable OpenCL-accelerated HDR -> SDR tonemapping
vaapi:
devices:
- /dev/dri:/dev/dri
vaapi-wsl: # use this for VAAPI if you're running Immich in WSL2
devices:
- /dev/dri:/dev/dri
volumes:
- /usr/lib/wsl:/usr/lib/wsl
environment:
- LD_LIBRARY_PATH=/usr/lib/wsl/lib
- LIBVA_DRIVER_NAME=d3d12

View file

@ -0,0 +1,16 @@
services:
jackett:
image: lscr.io/linuxserver/jackett:latest
container_name: jackett
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Kiyv
- AUTO_UPDATE=true #optional
- RUN_OPTS= #optional
volumes:
- ./config:/config
- ./blackhole:/downloads
ports:
- 9117:9117
restart: unless-stopped

6
jellyfin-stack/.env Normal file
View file

@ -0,0 +1,6 @@
BASE_PATH=/home/cakestwix/docker/jellyfin
JELLYFIN_URL=
PUID=1000
PGID=1000
MEDIA_SHARE=/media/Jellyfin
TZ=Europe/Kiyv

View file

@ -0,0 +1,115 @@
version: "3.0"
networks:
proxy:
driver: bridge
services:
# REVERSE PROXY IS IN OTHER DOCKER COMPOSE STACK
#Jellyfin - used to display the media
#This can also be replaced by Emby/Plex
jellyfin:
image: ghcr.io/linuxserver/jellyfin:latest
container_name: jellyfin
environment:
- JELLYFIN_PublishedServerUrl=${JELLYFIN_URL}
- PUID=${PUID}
- PGID=${PGID}
ports:
- 8096:8096
- 8920:8920
devices:
- /dev/dri:/dev/dri #Required for jellyfin HW transcoding / QuickSync
volumes:
- ${BASE_PATH}/jellyfin/config:/config:z
- /media/Jellyfin:/media:z
restart: unless-stopped
networks:
- proxy
#Radarr - used to find movies automatically
radarr:
image: lscr.io/linuxserver/radarr:latest
container_name: radarr
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- ${BASE_PATH}/radarr/config:/config:z
- ${MEDIA_SHARE}:/data:z #Access to the entire /media
ports:
- 7878:7878
networks:
- proxy
restart: unless-stopped
#Sonarr - used to find shows automatically
sonarr:
image: lscr.io/linuxserver/sonarr:latest
container_name: sonarr
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- ${BASE_PATH}/sonarr/config:/config:z
- ${MEDIA_SHARE}:/data:z #Access to the entire /media
ports:
- 8989:8989
networks:
- proxy
restart: unless-stopped
#jellyseer - allows users to request media on their own
jellyseerr:
image: fallenbagel/jellyseerr:latest
container_name: jellyseerr
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- ${BASE_PATH}/jellyseerr/config:/app/config:z
- ${MEDIA_SHARE}:/data:z #Access to the entire ${BASE_PATH}
networks:
- proxy
ports:
- 5055:5055
restart: unless-stopped
#Qbittorent - torrenting software
#
#You can also use RuTorrent, Transmisson or Deluge
transmission:
image: linuxserver/transmission:4.0.5
container_name: transmission
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
ports:
- 9091:9091
- 51413:51413
volumes:
- ${BASE_PATH}/transmission/config:/config:z
- ${MEDIA_SHARE}:/data:z
restart: unless-stopped
#Wizarr - Allows you to create a share link that you can send to users to invite them to your media server
wizarr:
container_name: wizarr
image: ghcr.io/wizarrrr/wizarr:latest
ports:
- 5690:5690
volumes:
- ${BASE_PATH}/wizarr/data/database:/data/database:z
networks:
- proxy
#volumes:
# media:
# driver: local-persist
# driver_opts:
# mountpoint: /media/HDD8

13
mqtt/docker-compose.yml Normal file
View file

@ -0,0 +1,13 @@
services:
mosquitto:
image: eclipse-mosquitto
container_name: mosquitto
volumes:
- ./config:/mosquitto/config
- ./data:/mosquitto/data
- ./log:/mosquitto/log
ports:
- 1883:1883
- 9001:9001
stdin_open: true
tty: true

View file

@ -0,0 +1,34 @@
version: '2'
volumes:
nextcloud:
db:
services:
db:
image: mariadb:10.6
restart: always
user: "www-data:www-data"
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW --skip-grant-tables
volumes:
- db:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=root
- MYSQL_PASSWORD=20302233Qq
- MYSQL_DATABASE=ncdb
- MYSQL_USER=nextcloud
- MYSQL_HOST=db
app:
image: nextcloud:30
restart: always
user: "www-data:www-data"
ports:
- 8083:80
links:
- db
volumes:
- nextcloud:/var/www/html:rw
- /media/HDD4/Nextcloud/data:/var/www/html/data:rw
environment:
- OVERWRITEPROTOCOL=https

53
rsshub/docker-compose.yml Normal file
View file

@ -0,0 +1,53 @@
services:
rsshub:
# two ways to enable puppeteer:
# * comment out marked lines, then use this image instead: diygod/rsshub:chromium-bundled
# * (consumes more disk space and memory) leave everything unchanged
image: diygod/rsshub:chromium-bundled
restart: always
ports:
- "1200:1200"
environment:
NODE_ENV: production
CACHE_TYPE: redis
REDIS_URL: "redis://redis:6379/"
TWITTER_USERNAME: WinnifredWhite5@outlook.com
TWITTER_PASSWORD: "Þ¨ºqÉè;uð˦ª&ºæ"
YOUTUBE_KEY: AIzaSyDj2QTT4vVTZVcxsdnOh9QBInMOJjlVuaA
# PUPPETEER_WS_ENDPOINT: "ws://browserless:3000" # marked
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:1200/healthz"]
interval: 30s
timeout: 10s
retries: 3
depends_on:
- redis
# - browserless # marked
#browserless: # marked
# image: browserless/chrome # marked
# restart: always # marked
# ulimits: # marked
# core: # marked
# hard: 0 # marked
# soft: 0 # marked
# healthcheck:
# test: ["CMD", "curl", "-f", "http://localhost:3000/pressure"]
# interval: 30s
# timeout: 10s
# retries: 3
redis:
image: redis:alpine
restart: always
volumes:
- redis-data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 10s
retries: 5
start_period: 5s
volumes:
redis-data:

View file

@ -0,0 +1,95 @@
version: "3"
services:
web:
image: registry.activitypub.software/transfem-org/sharkey:latest
#build: .
restart: always
links:
- db
- redis
# - mcaptcha
# - meilisearch
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
ports:
- "3001:3001"
networks:
- shonk
volumes:
- /media/HDD4/Sharekey:/sharkey/files:z
- .config:/sharkey/.config:z
redis:
restart: always
image: redis:7-alpine
networks:
- shonk
volumes:
- ./redis:/data:z
healthcheck:
test: "redis-cli ping"
interval: 5s
retries: 20
db:
restart: always
image: postgres:15-alpine
networks:
- shonk
env_file: .config/docker.env
volumes:
- ./db:/var/lib/postgresql/data:z
healthcheck:
test: "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"
interval: 5s
retries: 20
# mcaptcha:
# restart: always
# image: mcaptcha/mcaptcha:latest
# networks:
# internal_network:
# external_network:
# aliases:
# - localhost
# ports:
# - 7493:7493
# env_file:
# - .config/docker.env
# environment:
# PORT: 7493
# MCAPTCHA_redis_URL: "redis://mcaptcha_redis/"
# depends_on:
# db:
# condition: service_healthy
# mcaptcha_redis:
# condition: service_healthy
#
# mcaptcha_redis:
# image: mcaptcha/cache:latest
# networks:
# - internal_network
# healthcheck:
# test: "redis-cli ping"
# interval: 5s
# retries: 20
# meilisearch:
# restart: always
# image: getmeili/meilisearch:v1.3.4
# environment:
# - MEILI_NO_ANALYTICS=true
# - MEILI_ENV=production
# - MEILI_MASTER_KEY=ChangeThis
# networks:
# - shonk
# volumes:
# - ./meili_data:/meili_data
networks:
shonk:

View file

@ -0,0 +1,43 @@
version: '3'
services:
viewtube:
restart: unless-stopped
# Or use mauriceo/viewtube:dev for the development version
image: mauriceo/viewtube:dev
# ViewTube will not start until the database and redis are ready
depends_on:
- viewtube-mongodb
- viewtube-redis
# Make sure all services are in the same network
networks:
- viewtube
volumes:
# This will map ViewTube's data directory to the local folder ./data/viewtube/
- ./data/viewtube:/data
environment:
- VIEWTUBE_YOUTUBE_COOKIE=__Secure-3PAPISID=fyyI_eKLu_-1X8i3/An-I4Aj_nCKuFCpIi; SID=g.a000mwhAsQrnPQ98o9xb4TVj9AKWYWozHuUe6fKCQI2rFngmuq77r7h7mnKiQA_9-3RXyH4QawACgYKAdkSARQSFQHGX2MiDDf5_IgydjsrpQqSaHyhzxoVAUF8yKo_VKlXUj09dGIEyDUOlrhU0076; APISID=nI7yRerUOKgjvoZV/Ax3kLSKRoxcuUyYIJ; SAPISID=fyyI_eKLu_-1X8i3/An-I4Aj_nCKuFCpIi; __Secure-1PAPISID=fyyI_eKLu_-1X8i3/An-I4Aj_nCKuFCpIi; SIDCC=AKEyXzVJdHtcVFSDNvQG3sulvLIlJP1jYT4Idn6JbVgyEHZ_xBIjlht_FpbSzZEf9gOFtqW7gZY; PREF=f7=100&tz=UTC&f6=400
- VIEWTUBE_ADMIN_USER=CakesTwix
- VIEWTUBE_DATABASE_HOST=viewtube-mongodb
- VIEWTUBE_REDIS_HOST=viewtube-redis
ports:
- 8066:8066
viewtube-mongodb:
restart: unless-stopped
image: mongo:4.4
networks:
- viewtube
volumes:
- ./data/db:/data/db
viewtube-redis:
restart: unless-stopped
image: redis:7
networks:
- viewtube
volumes:
- ./data/redis:/data
networks:
viewtube: