Compare commits

...

24 Commits

Author SHA1 Message Date
8c402e3084 gitea/peertube runner foundations. secrets mgmt is a blocker. Adding that to my shell framework and will vendor that framework into here. 2025-07-15 10:53:19 -05:00
b4b36b6084 don't need that stuff in git. 2025-07-11 10:56:37 -05:00
57ec164675 all fixed 2025-07-11 10:53:35 -05:00
3fda905bcd digitial workbench of my dreams finally coming together. selfstack has been on my todo list since last november. 2025-07-11 10:45:45 -05:00
a2ee7ace3e getting the selfstack fully operational. finally! 2025-07-11 00:11:39 -05:00
029147a0c1 . 2025-07-11 00:03:32 -05:00
9cebc137af i eat an incredibly routine/consistent/simple diet. cleanslate is overkill. de-scoped. 2025-07-11 00:01:27 -05:00
de4bd831f4 will experiment more with this tomorrow, but i am close. 2025-07-10 23:45:05 -05:00
3d382abf06 apple health data will soon be out in the world... 2025-07-10 23:42:11 -05:00
9e98162d2d i'm eager to bring youtube videos into audio book shelf as part of the TSYS reference library 2025-07-10 23:24:21 -05:00
865e1cc036 roll up commit 2025-07-10 22:56:58 -05:00
749fccbac7 because immich api key 2025-07-10 14:02:15 -05:00
06847733ff secrets mgmt coming soon 2025-07-10 14:01:31 -05:00
8e620de1f5 new day new app 2025-07-10 09:44:49 -05:00
b0389e0d9c finally getting my personal digital workbench setup. it's nice. 2025-07-02 08:31:03 -05:00
localuser
0b48b5c847 test 2025-06-26 11:50:25 -05:00
059bfb8c33 fitness... 2025-06-26 09:20:12 -05:00
99776f1bc8 docs 2025-06-26 09:07:01 -05:00
12baba3003 vscode server no longer needed 2025-06-26 08:46:36 -05:00
8d844b2ae8 server name change 2025-06-26 08:36:23 -05:00
7d09a4e0b1 My server, my way. 2025-06-25 10:57:55 -05:00
eb2f116d7b closer to a full instrumentation stack 2024-12-07 17:05:02 -06:00
7a29dbd3c7 . 2024-12-07 14:54:34 -06:00
a0395996dc good save point 2024-12-07 14:54:27 -06:00
33 changed files with 517 additions and 376 deletions

View File

@@ -2,22 +2,23 @@
## Introduction ## Introduction
This repo is for containers running on Charles (@ReachableCEO) laptop This repo is for containers running on Charles (@ReachableCEO) workstation (CharlesDevServer)
## Portma| It is getting dialed in over time, this is the current snapshot as of 7/2/2025.
## Portmap
| application | port | | application | port |
| ------------------------------------------------- | ---- | |---------------------------------------------------|------|
| (timetracking) wakaapi | 2001 | | (timetracking) wakaapi | 2001 |
| (analytics) metrics | 2002 | | (analytics) metrics | 2002 |
| (workout tracking) wger | 2003 | | (analytics) shell history (atuin) | 2003 |
| (youtube archiving) tube-archivist | 2004 | | (youtube archiving) tube-archivist | 2004 |
| (dashboard to all my local hosted apps) easy-gate | 2005 | | (dashboard to all my local hosted apps) easy-gate | 2005 |
| (the only editor) vscode-server | 2006 | | apple heatlh export to influx/grafana | 2006 |
| (resume hacking) reactive-resume | 2007 | | (resume hacking) reactive-resume | 2007 |
| (habit tracking) atomichabits | 2008 | | (habit tracking) atomichabits | 2008 |
| (workout tracking) wael | 2009 | | kiwix | 2009 |
| (food tracking) cleanslate | 2010 |
## Secrets ## Secrets

View File

@@ -1,90 +0,0 @@
services:
cleanslate-database:
image: postgres:15
restart: always
container_name: cleanslate-database
ports:
- '35000:5432'
environment:
POSTGRES_PASSWORD: 1234
volumes:
- cleanslate-database:/var/lib/postgresql/data
healthcheck:
test: [ 'CMD-SHELL', 'pg_isready -U postgres']
interval: 10s
timeout: 5s
retries: 5
cleanslate-graphql-server:
image: hasura/graphql-engine:latest.cli-migrations-v3
restart: always
container_name: cleanslate-graphql-server
ports:
- '35001:8080'
environment:
HASURA_GRAPHQL_ADMIN_SECRET: ${HASURA_GRAPHQL_ADMIN_SECRET}
HASURA_GRAPHQL_CORS_DOMAIN: https://${NEXT_PUBLIC_HASURA_DOMAIN}
HASURA_GRAPHQL_DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD}@cleanslate-database:35000/postgres
HASURA_GRAPHQL_DEV_MODE: false
HASURA_GRAPHQL_ENABLE_CONSOLE: true
HASURA_GRAPHQL_ENABLED_LOG_TYPES: startup, http-log, webhook-log, websocket-log, query-log
HASURA_GRAPHQL_JWT_SECRET: ${HASURA_GRAPHQL_JWT_SECRET}
volumes:
- cleanslate-migrations:/hasura-migrations
- cleanslate-metadata:/hasura-metadata
depends_on:
cleanslate-database:
condition: service_healthy
extra_hosts:
- 'host.docker.internal:host-gateway'
cleanslate-authentication-server:
image: ghcr.io/successible/cleanslate/authentication-server:latest
build:
context: .
dockerfile: Dockerfile.server
pull_policy: ${PULL_POLICY:-always}
restart: always
container_name: authentication-server
ports:
- '35002:3001'
environment:
HASURA_GRAPHQL_ADMIN_SECRET: ${HASURA_GRAPHQL_ADMIN_SECRET}
JWT_SIGNING_SECRET: ${JWT_SIGNING_SECRET}
NEXT_PUBLIC_USE_FIREBASE: ${NEXT_PUBLIC_USE_FIREBASE:-false}
depends_on:
- cleanslate-database
- cleanslate-graphql-server
extra_hosts:
- 'host.docker.internal:host-gateway'
cleanslate-client:
image: ghcr.io/successible/cleanslate/client:latest
build:
context: .
dockerfile: Dockerfile
args:
- NEXT_PUBLIC_FIREBASE_CONFIG=${NEXT_PUBLIC_FIREBASE_CONFIG:-{}}
- NEXT_PUBLIC_LEGAL_LINK=${NEXT_PUBLIC_LEGAL_LINK:-no}
- NEXT_PUBLIC_LOGIN_WITH_APPLE=${NEXT_PUBLIC_LOGIN_WITH_APPLE:-no}
- NEXT_PUBLIC_LOGIN_WITH_FACEBOOK=${NEXT_PUBLIC_LOGIN_WITH_FACEBOOK:-no}
- NEXT_PUBLIC_LOGIN_WITH_GITHUB=${NEXT_PUBLIC_LOGIN_WITH_GITHUB:-no}
- NEXT_PUBLIC_LOGIN_WITH_GOOGLE=${NEXT_PUBLIC_LOGIN_WITH_GOOGLE:-no}
- NEXT_PUBLIC_REACT_SENTRY_DSN=${NEXT_PUBLIC_REACT_SENTRY_DSN:-no}
- NEXT_PUBLIC_USE_FIREBASE=${NEXT_PUBLIC_USE_FIREBASE:-false}
- NEXT_PUBLIC_VERSION=${NEXT_PUBLIC_VERSION}
pull_policy: ${PULL_POLICY:-always}
restart: always
container_name: client
ports:
- '2010:3000'
depends_on:
- cleanslate-database
- cleanslate-graphql-server
extra_hosts:
- 'host.docker.internal:host-gateway'
volumes:
cleanslate-database:
cleanslate-migrations:
cleanslate-metadata:

View File

@@ -1,35 +0,0 @@
# compose.yaml
services:
influxdb2:
image: influxdb:2
container_name: reachableceo-influxdb
ports:
- 4000:8086
environment:
DOCKER_INFLUXDB_INIT_MODE: setup
DOCKER_INFLUXDB_INIT_USERNAME_FILE: /run/secrets/influxdb2-admin-username
DOCKER_INFLUXDB_INIT_PASSWORD_FILE: /run/secrets/influxdb2-admin-password
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN_FILE: /run/secrets/influxdb2-admin-token
DOCKER_INFLUXDB_INIT_ORG: docs
DOCKER_INFLUXDB_INIT_BUCKET: home
secrets:
- influxdb2-admin-username
- influxdb2-admin-password
- influxdb2-admin-token
volumes:
- type: volume
source: influxdb2-data
target: /var/lib/influxdb2
- type: volume
source: influxdb2-config
target: /etc/influxdb2
secrets:
influxdb2-admin-username:
file: $HOME/.env/influxdb2/admin-username
influxdb2-admin-password:
file: $HOME/.env/influxdb2/admin-password
influxdb2-admin-token:
file: $HOME/.env/influxdb2/admin-token
volumes:
influxdb2-data:
influxdb2-config:

129
inprep/applehealth-grafana/.gitignore vendored Executable file
View File

@@ -0,0 +1,129 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/

View File

@@ -0,0 +1,6 @@
FROM python:3.9
WORKDIR /app
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY . .
CMD ["python", "app.py"]

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021 Ivaylo Pavlov
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1 @@
A step by step setup guide is available in this [blog post on Ivo's Blog](https://www.ivaylopavlov.com/charting-apple-healthkit-data-in-grafana/).

107
inprep/applehealth-grafana/app.py Executable file
View File

@@ -0,0 +1,107 @@
import json
import sys
import socket
import logging
from datetime import datetime
from flask import request, Flask
from influxdb import InfluxDBClient
from geolib import geohash
DATAPOINTS_CHUNK = 80000
logger = logging.getLogger("console-output")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
app = Flask(__name__)
app.debug = True
client = InfluxDBClient(host='localhost', port=20007)
client.create_database('db')
client.switch_database('db')
@app.route('/collect', methods=['POST', 'GET'])
def collect():
logger.info(f"Request received")
healthkit_data = None
transformed_data = []
try:
healthkit_data = json.loads(request.data)
except:
return "Invalid JSON Received", 400
try:
logger.info(f"Ingesting Metrics")
for metric in healthkit_data.get("data", {}).get("metrics", []):
number_fields = []
string_fields = []
for datapoint in metric["data"]:
metric_fields = set(datapoint.keys())
metric_fields.remove("date")
for mfield in metric_fields:
if type(datapoint[mfield]) == int or type(datapoint[mfield]) == float:
number_fields.append(mfield)
else:
string_fields.append(mfield)
point = {
"measurement": metric["name"],
"time": datapoint["date"],
"tags": {str(nfield): str(datapoint[nfield]) for nfield in string_fields},
"fields": {str(nfield): float(datapoint[nfield]) for nfield in number_fields}
}
transformed_data.append(point)
number_fields.clear()
string_fields.clear()
logger.info(f"Data Transformation Complete")
logger.info(f"Number of data points to write: {len(transformed_data)}")
logger.info(f"DB Write Started")
for i in range(0, len(transformed_data), DATAPOINTS_CHUNK):
logger.info(f"DB Writing chunk")
client.write_points(transformed_data[i:i + DATAPOINTS_CHUNK])
logger.info(f"DB Metrics Write Complete")
logger.info(f"Ingesting Workouts Routes")
transformed_workout_data = []
for workout in healthkit_data.get("data", {}).get("workouts", []):
tags = {
"id": workout["name"] + "-" + workout["start"] + "-" + workout["end"]
}
for gps_point in workout["route"]:
point = {
"measurement": "workouts",
"time": gps_point["timestamp"],
"tags": tags,
"fields": {
"lat": gps_point["lat"],
"lng": gps_point["lon"],
"geohash": geohash.encode(gps_point["lat"], gps_point["lon"], 7),
}
}
transformed_workout_data.append(point)
for i in range(0, len(transformed_workout_data), DATAPOINTS_CHUNK):
logger.info(f"DB Writing chunk")
client.write_points(transformed_workout_data[i:i + DATAPOINTS_CHUNK])
logger.info(f"Ingesting Workouts Complete")
except:
logger.exception("Caught Exception. See stacktrace for details.")
return "Server Error", 500
return "Success", 200
if __name__ == "__main__":
hostname = socket.gethostname()
ip_address = socket.gethostbyname(hostname)
logger.info(f"Local Network Endpoint: http://{ip_address}/collect")
app.run(host='0.0.0.0', port=5353)

View File

@@ -0,0 +1,19 @@
services:
web:
build: .
ports:
- 2006:2007
depends_on:
- influxdbV1
volumes:
- .:/app
influxdbV1:
image: influxdb:1.8.4
container_name: reachableceo-health-data
ports:
- "20007:8086"
volumes:
- reachableceo-health-data:/var/lib/influxdb
restart: unless-stopped
volumes:
reachableceo-health-data:

View File

@@ -0,0 +1,3 @@
flask
influxdb
geolib

View File

@@ -0,0 +1,37 @@
{
"data" : {
"workouts" : [],
"metrics" : [
{
"data" : [
{
"qty" : 34,
"date" : "2021-03-14 00:01:00 +0000"
},
{
"date" : "2021-03-14 00:51:00 +0000",
"qty" : 25
},
{
"qty" : 8.2796763649358063,
"date" : "2021-03-14 01:13:00 +0000"
},
{
"date" : "2021-03-14 01:14:00 +0000",
"qty" : 17.719295979506633
},
{
"date" : "2021-03-14 01:15:00 +0000",
"qty" : 17.719295979506636
},
{
"date" : "2021-03-14 01:16:00 +0000",
"qty" : 7.2817316760509208
}
],
"name" : "step_count",
"units" : "count"
}
]
}
}

View File

@@ -0,0 +1 @@
https://docs.gitea.com/next/usage/actions/act-runner/

View File

@@ -0,0 +1,14 @@
version: "3.8"
services:
runner:
image: docker.io/gitea/act_runner:nightly
environment:
CONFIG_FILE: /config.yaml
GITEA_INSTANCE_URL: "${INSTANCE_URL}"
GITEA_RUNNER_REGISTRATION_TOKEN: "${REGISTRATION_TOKEN}"
GITEA_RUNNER_NAME: "${RUNNER_NAME}"
GITEA_RUNNER_LABELS: "${RUNNER_LABELS}"
volumes:
- ./config.yaml:/config.yaml
- ./data:/data
- /var/run/docker.sock:/var/run/docker.sock

View File

@@ -0,0 +1,13 @@
services:
kiwix-serve:
ports:
- 2013:8080
image: ghcr.io/kiwix/kiwix-serve:latest
# uncomment next 4 lines to use it with local zim file in /tmp/zim
# volumes:
# - /tmp/zim:/data
# command:
# - '*.zim'
# uncomment next 2 lines to use it with remote zim file
# environment:
# - 'DOWNLOAD=https://download.kiwix.org/zim/wikipedia_bm_all.zim'

View File

@@ -1,11 +1,10 @@
services: services:
metrics: reachableceo-metrics:
image: ghcr.io/lowlighter/metrics:latest image: ghcr.io/lowlighter/metrics:latest
container_name: reachableceo-metrics container_name: reachableceo-metrics
entrypoint: "" entrypoint: [""]
command: ["npm", "start"] command: ["npm", "start"]
ports: ports:
- 2002:3000 - "0.0.0.0:2002:3000"
volumes: volumes:
- ./settings.json:/metrics/settings.json - "./settings.json:/metrics/settings.json"
restart: no

View File

@@ -2,7 +2,7 @@
"//": "Example of configuration for metrics web instance", "//": "Example of configuration for metrics web instance",
"//": "====================================================================", "//": "====================================================================",
"token": "A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", "//": "GitHub Personal Token (required)", "token": "GITHUB API TOKEN", "//": "GitHub Personal Token (required)",
"modes": ["embed", "insights"], "//": "Web instance enabled modes", "modes": ["embed", "insights"], "//": "Web instance enabled modes",
"restricted": [], "//": "Authorized users (empty to disable)", "restricted": [], "//": "Authorized users (empty to disable)",
"maxusers": 0, "//": "Maximum users, (0 to disable)", "maxusers": 0, "//": "Maximum users, (0 to disable)",
@@ -17,8 +17,8 @@
"padding": ["0", "8 + 11%"], "//": "Image padding (default)", "padding": ["0", "8 + 11%"], "//": "Image padding (default)",
"outputs": ["svg", "png", "json"], "//": "Image output formats (empty to enable all)", "outputs": ["svg", "png", "json"], "//": "Image output formats (empty to enable all)",
"hosted": { "hosted": {
"by": "ReachableCEO Dev Workstation", "//": "Web instance host (displayed in footer)", "by": "", "//": "Web instance host (displayed in footer)",
"link": "https://www.reachableceo.com", "//": "Web instance host link (displayed in footer)" "link": "", "//": "Web instance host link (displayed in footer)"
}, },
"oauth":{ "oauth":{
"id": null, "//": "GitHub OAUTH client id", "id": null, "//": "GitHub OAUTH client id",
@@ -68,80 +68,80 @@
"enabled": false, "//": "Enable isocalendar plugin" "enabled": false, "//": "Enable isocalendar plugin"
}, },
"languages":{ "languages":{
"enabled": true, "//": "Enable languages plugin" "enabled": false, "//": "Enable languages plugin"
}, },
"stargazers":{ "stargazers":{
"worldmap.token": null, "//": "Stargazers worldmap token", "worldmap.token": null, "//": "Stargazers worldmap token",
"enabled": false, "//": "Enable stargazers plugin" "enabled": false, "//": "Enable stargazers plugin"
}, },
"lines":{ "lines":{
"enabled": true, "//": "Enable lines plugin" "enabled": false, "//": "Enable lines plugin"
}, },
"topics":{ "topics":{
"enabled": true, "//": "Enable topics plugin" "enabled": false, "//": "Enable topics plugin"
}, },
"stars":{ "stars":{
"enabled": true, "//": "Enable stars plugin" "enabled": false, "//": "Enable stars plugin"
}, },
"licenses":{ "licenses":{
"enabled": true, "//": "Enable licenses plugin" "enabled": false, "//": "Enable licenses plugin"
}, },
"habits":{ "habits":{
"enabled": true, "//": "Enable habits plugin" "enabled": false, "//": "Enable habits plugin"
}, },
"contributors":{ "contributors":{
"enabled": true, "//": "Enable contributors plugin" "enabled": false, "//": "Enable contributors plugin"
}, },
"followup":{ "followup":{
"enabled": true, "//": "Enable followup plugin" "enabled": false, "//": "Enable followup plugin"
}, },
"reactions":{ "reactions":{
"enabled": true, "//": "Enable reactions plugin" "enabled": false, "//": "Enable reactions plugin"
}, },
"people":{ "people":{
"enabled": true, "//": "Enable people plugin" "enabled": false, "//": "Enable people plugin"
}, },
"sponsorships":{ "sponsorships":{
"enabled": true, "//": "Enable sponsorships plugin" "enabled": false, "//": "Enable sponsorships plugin"
}, },
"sponsors":{ "sponsors":{
"enabled": true, "//": "Enable sponsors plugin" "enabled": false, "//": "Enable sponsors plugin"
}, },
"repositories":{ "repositories":{
"enabled": true, "//": "Enable repositories plugin" "enabled": false, "//": "Enable repositories plugin"
}, },
"discussions":{ "discussions":{
"enabled": true, "//": "Enable discussions plugin" "enabled": false, "//": "Enable discussions plugin"
}, },
"starlists":{ "starlists":{
"enabled": true, "//": "Enable starlists plugin" "enabled": false, "//": "Enable starlists plugin"
}, },
"calendar":{ "calendar":{
"enabled": true, "//": "Enable calendar plugin" "enabled": false, "//": "Enable calendar plugin"
}, },
"achievements":{ "achievements":{
"enabled": true, "//": "Enable achievements plugin" "enabled": false, "//": "Enable achievements plugin"
}, },
"notable":{ "notable":{
"enabled": true, "//": "Enable notable plugin" "enabled": false, "//": "Enable notable plugin"
}, },
"activity":{ "activity":{
"enabled": true, "//": "Enable activity plugin" "enabled": false, "//": "Enable activity plugin"
}, },
"traffic":{ "traffic":{
"enabled": true, "//": "Enable traffic plugin" "enabled": false, "//": "Enable traffic plugin"
}, },
"code":{ "code":{
"enabled": true, "//": "Enable code plugin" "enabled": false, "//": "Enable code plugin"
}, },
"gists":{ "gists":{
"enabled": true, "//": "Enable gists plugin" "enabled": false, "//": "Enable gists plugin"
}, },
"projects":{ "projects":{
"enabled": true, "//": "Enable projects plugin" "enabled": false, "//": "Enable projects plugin"
}, },
"introduction":{ "introduction":{
"enabled": true, "//": "Enable introduction plugin" "enabled": false, "//": "Enable introduction plugin"
}, },
"skyline":{ "skyline":{
"enabled": false, "//": "Enable skyline plugin" "enabled": false, "//": "Enable skyline plugin"
@@ -174,12 +174,8 @@
"enabled": false, "//": "Enable rss plugin" "enabled": false, "//": "Enable rss plugin"
}, },
"wakatime":{ "wakatime":{
"token": "664dd8b2-b73a-4f31-82ef-96ed424814d2", "token": "WakaTime API token",
"enabled": true, "//": "Enable wakatime plugin" "enabled": false, "//": "Enable wakatime plugin"
"url": "http://localhost:2001/api",
"days": 7,
"sections": "time, projects, projects-graphs",
"limit": 4,
}, },
"leetcode":{ "leetcode":{
"enabled": false, "//": "Enable leetcode plugin" "enabled": false, "//": "Enable leetcode plugin"
@@ -222,4 +218,4 @@
}, },
"//": "" "//": ""
} }
} }

View File

@@ -0,0 +1,9 @@
FROM alpine:latest
RUN apk add --no-cache nodejs npm ffmpeg
RUN npm install -g @peertube/peertube-runner
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

View File

@@ -0,0 +1,21 @@
# PeerTube Runner Docker
This Docker image sets up a PeerTube runner with all necessary dependencies installed.
When stopping the container, the runner is removed from the peertube instance.
Name is set to the container ID when run standalone, and the node name when run in a swarm .
## Usage
1. **Environment Variables**:
- `URL`: PeerTube instance URL.
- `TOKEN`: Registration token.
2. **Run the Container**:
```bash
docker run -e URL=http://127.0.0.1:9000 -e TOKEN=ptrrt-**** peertube-runner
```
## Note
This container is meant to be a basic runner, so there is no encryption or captioning

View File

@@ -0,0 +1,20 @@
#!/bin/sh
if [ -n "$DOCKER_SERVICE_NAME" ]; then
NAME="$NODE_NAME"
else
NAME=$(uname -n)
fi
cleanup() {
echo "Container is stopping, removing runner '$NAME'..."
peertube-runner unregister --url $URL --runner-name $NAME
}
trap cleanup SIGTERM
echo "Starting runner $NAME"...
peertube-runner server & sleep 5 && \
peertube-runner register --url $URL --registration-token $TOKEN --runner-name $NAME && \
wait

View File

@@ -1,4 +1,3 @@
version: '3.8'
services: services:
tsys-atomichabits: tsys-atomichabits:
image: ghcr.io/majorpeter/atomic-tracker:latest image: ghcr.io/majorpeter/atomic-tracker:latest
@@ -7,7 +6,7 @@ services:
- "2008:8080" - "2008:8080"
volumes: volumes:
- tsys-atomichabits:/config - tsys-atomichabits:/config
restart: no restart: always
volumes: volumes:
tsys-atomichabits: tsys-atomichabits:

View File

@@ -0,0 +1,4 @@
ATUIN_DB_NAME=atuin
ATUIN_DB_USERNAME=atuin
# Choose your own secure password
ATUIN_DB_PASSWORD=ThisIsreally-insecure123

View File

@@ -0,0 +1,2 @@
config/*
database/*

View File

@@ -0,0 +1,25 @@
services:
atuin:
restart: always
image: ghcr.io/atuinsh/atuin:18.6.1
command: server start
volumes:
- "./config:/config"
links:
- postgresql:db
ports:
- 2003:8888
environment:
ATUIN_HOST: "0.0.0.0"
ATUIN_OPEN_REGISTRATION: "true"
ATUIN_DB_URI: postgres://$ATUIN_DB_USERNAME:$ATUIN_DB_PASSWORD@db/$ATUIN_DB_NAME
RUST_LOG: info,atuin_server=debug
postgresql:
image: postgres:14
restart: unless-stopped
volumes: # Don't remove permanent storage for index database files!
- "./database:/var/lib/postgresql/data/"
environment:
POSTGRES_USER: ${ATUIN_DB_USERNAME}
POSTGRES_PASSWORD: ${ATUIN_DB_PASSWORD}
POSTGRES_DB: ${ATUIN_DB_NAME}

View File

@@ -23,48 +23,43 @@
{ {
"name": "WAKAAPI Dashboard", "name": "WAKAAPI Dashboard",
"category": "", "category": "",
"url": "http://tsys1:2001", "url": "http://CharlesDevServer.knel.net:2001",
}, },
{ {
"name": "Metrics Dashboard", "name": "Metrics Dashboard",
"category": "", "category": "",
"url": "http://tsys1:2002", "url": "http://CharlesDevServer.knel.net:2002",
}, },
{ {
"name": "wger-Workout Tracking", "name": "Atuin",
"category": "", "category": "",
"url": "http://tsys1:2003", "url": "http://CharlesDevServer.knel.net:2003",
}, },
{ {
"name": "Tube Archivist", "name": "Tube Archivist",
"category": "", "category": "",
"url": "http://tsys1:2004", "url": "http://CharlesDevServer.knel.net:2004",
},
{
"name": "VsCode Server",
"category": "",
"url": "http://tsys1:2006",
}, },
{ {
"name": "Reactive Resume", "name": "Reactive Resume",
"category": "", "category": "",
"url": "http://tsys1:2007", "url": "http://CharlesDevServer.knel.net:2007",
}, },
{ {
"name": "Atomic Habits", "name": "Atomic Habits",
"category": "", "category": "",
"url": "http://tsys1:2008" "url": "http://CharlesDevServer.knel.net:2008"
}, },
{ {
"name": "Cleanslate", "name": "Kiwix",
"category": "", "category": "",
"url": "http://tsys1:2010" "url": "http://CharlesDevServer.knel.net:2009",
}, },
{ {
"name": "Influxdb", "name": "Apple Health Exporter",
"category": "", "category": "",
"url": "http://tsys1:4000", "url": "http://CharlesDevServer.knel.net:2010",
}, }
], ],
"notes": [ "notes": [
{ {

View File

@@ -6,40 +6,41 @@ services:
ports: ports:
- 2004:8000 - 2004:8000
volumes: volumes:
- tubearchivist-media:/youtube - media:/youtube
- tubearchivist-cache:/cache - cache:/cache
environment: environment:
- ES_URL=http://tubearchivist-es:9200 # needs protocol e.g. http and port - ES_URL=http://archivist-es:9200 # needs protocol e.g. http and port
- REDIS_HOST=tubearchivist-redis # don't add protocol - TA_PORT=2004
- REDIS_CON=redis://archivist-redis:6379
- HOST_UID=1000 - HOST_UID=1000
- HOST_GID=1000 - HOST_GID=1000
- TA_HOST=tsys1 # set your host name - TA_HOST=http://charlesdevserver.knel.net:2004 # set your host name with protocol and port
- TA_USERNAME=tubearchivist # your initial TA credentials - TA_USERNAME=tubearchivist # your initial TA credentials
- TA_PASSWORD=verysecret # your initial TA credentials - TA_PASSWORD=verysecret # your initial TA credentials
- ELASTIC_PASSWORD=verysecret # set password for Elasticsearch - ELASTIC_PASSWORD=verysecret # set password for Elasticsearch
- TZ=America/Chicago # set your time zone - TZ=America/Chicago # set your time zone
healthcheck: healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:2004/health"] test: ["CMD", "curl", "-f", "http://localhost:8000/api/health"]
interval: 2m interval: 2m
timeout: 10s timeout: 10s
retries: 3 retries: 3
start_period: 30s start_period: 30s
depends_on: depends_on:
- tubearchivist-es - archivist-es
- tubearchivist-redis - archivist-redis
tubearchivist-redis: archivist-redis:
image: redis/redis-stack-server image: redis
container_name: tubearchivist-redis container_name: archivist-redis
restart: unless-stopped restart: unless-stopped
expose: expose:
- "6379" - "6379"
volumes: volumes:
- tubearchivist-redis:/data - redis:/data
depends_on: depends_on:
- tubearchivist-es - archivist-es
tubearchivist-es: archivist-es:
image: bbilly1/tubearchivist-es # only for amd64, or use official es 8.14.3 image: bbilly1/tubearchivist-es # only for amd64, or use official es 8.18.0
container_name: tubearchivist-es container_name: archivist-es
restart: unless-stopped restart: unless-stopped
environment: environment:
- "ELASTIC_PASSWORD=verysecret" # matching Elasticsearch password - "ELASTIC_PASSWORD=verysecret" # matching Elasticsearch password
@@ -52,12 +53,12 @@ services:
soft: -1 soft: -1
hard: -1 hard: -1
volumes: volumes:
- tubearchivist-es:/usr/share/elasticsearch/data # check for permission error when using bind mount, see readme - es:/usr/share/elasticsearch/data # check for permission error when using bind mount, see readme
expose: expose:
- "9200" - "9200"
volumes: volumes:
tubearchivist-media: media:
tubearchivist-cache: cache:
tubearchivist-redis: redis:
tubearchivist-es: es:

View File

@@ -1,12 +0,0 @@
services:
tsys-vscode-server:
image: gitpod/openvscode-server
container_name: reachableceo-vscode-server
restart: always
ports:
- 2006:3000
volumes:
- tsys-vscode-server:/home/workspace:cached
volumes:
tsys-vscode-server:

View File

@@ -0,0 +1,8 @@
docker run \
-it \
-v "/home/localuser/Photos":/import:ro \
-e IMMICH_INSTANCE_URL=https://photos.knownelement.com/api \
-e IMMICH_API_KEY=8z7wpVjeEaNm1wpkY9qpKegQMCwFuOGzWTGRHzkUrs0 \
ghcr.io/immich-app/immich-cli:latest

View File

@@ -1,28 +0,0 @@
upstream wger {
server wger-web:2003;
}
server {
listen 80;
location / {
proxy_pass http://wger;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header X-Forwarded-Host $host:$server_port;
proxy_redirect off;
}
location /static/ {
alias /wger/static/;
}
location /media/ {
alias /wger/media/;
}
# Increase max body size to allow for video uploads
client_max_body_size 100M;
}

View File

@@ -1,116 +0,0 @@
# Please consult the `Deployment` section in the readme if you want to deploy
# this. You *need* to keep this nginx service, even if you have your own,
# otherwise the static files will not be served correctly! If you do remove
# it, configure yours similarly to what's in config/nginx.conf
# Also take a look at the "Static files" section in the .env file
services:
wger-web:
image: wger/server:latest
depends_on:
wger-db:
condition: service_healthy
wger-cache:
condition: service_healthy
#env_file:
# - ./config/prod.env
volumes:
- wger-static:/home/wger/static
expose:
- 2003
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:8000
interval: 10s
timeout: 5s
start_period: 300s
retries: 5
restart: unless-stopped
wger-nginx:
image: nginx:stable
depends_on:
- wger-web
volumes:
- ./config/nginx.conf:/etc/nginx/conf.d/default.conf
- wger-static:/wger/static:ro
- wger-media:/wger/media:ro
ports:
- "80:80"
healthcheck:
test: service nginx status
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
restart: unless-stopped
wger-db:
image: postgres:15-alpine
environment:
- POSTGRES_USER=wger
- POSTGRES_PASSWORD=wger
- POSTGRES_DB=wger
volumes:
- wger-postgres-data:/var/lib/postgresql/data/
expose:
- 5432
healthcheck:
test: pg_isready -U wger
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
restart: unless-stopped
wger-cache:
image: redis:latest
expose:
- 6379
volumes:
- wger-redis-data:/data
healthcheck:
test: redis-cli ping
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
restart: unless-stopped
wger-celery_worker:
image: wger/server:latest
command: /start-worker
#env_file:
# - ./config/prod.env
volumes:
- wger-media:/home/wger/media
depends_on:
wger-web:
condition: service_healthy
healthcheck:
test: celery -A wger inspect ping
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
wger-celery_beat:
image: wger/server:latest
command: /start-beat
volumes:
- wger-celery-beat:/home/wger/beat/
#env_file:
# - ./config/prod.env
depends_on:
wger-celery_worker:
condition: service_healthy
volumes:
wger-postgres-data:
wger-celery-beat:
wger-static:
wger-media:
wger-redis-data:
networks:
default:
name: wger_network

View File

@@ -1,9 +0,0 @@
#!/bin/bash
#docker compose exec wger-web python3 manage.py sync-exercises
docker compose exec wger-web python3 manage.py download-exercise-images
docker compose exec wger-web python3 manage.py download-exercise-videos
docker compose exec wger-web wger load-online-fixtures
# afterwards:
docker compose exec wger-web python3 manage.py sync-ingredients