ADRs: - 0006 Embedding-Modell-Migration v3->v4 (#123) - 0008 DDD-Lightweight-Migration (#136) Analysen: - ddd-bewertung.md (1237 Zeilen) — vollstaendige DDD-Analyse mit Tages-Roadmap - protokoll-parser-v6-machbarkeit.md (418 Zeilen) — #106 Phase 2 Vorbereitung Reference: - zugriffsrechte.md — 63 Routes x 3 User-Status, UI-Sichtbarkeits-Matrix Ops: - scripts/deploy.sh — mit Uptime-Kuma-Wartungsmodus (#149) - scripts/run-digest.sh — taeglicher Mail-Digest-Cron - scripts/run-monitoring-scan.sh — Monitoring-Scan-Cron (noch nicht aktiv) - scripts/smoke-test.sh — Gesamt-Funktionspruefung - pytest.ini: integration/slow/e2e Markers, addopts not-integration Tests/integration/: Live-Adapter-Tests + Frontend-XRef + Citation-Substring + Wahlprogramm-Indexed (4 Live-Test-Suites, marker-opt-in) Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
86 lines
2.5 KiB
Bash
Executable File
86 lines
2.5 KiB
Bash
Executable File
#!/bin/bash
|
|
# Deploy-Script mit Uptime-Kuma-Wartungsmodus
|
|
# Usage: ./scripts/deploy.sh [files...]
|
|
# Ohne Argumente: alles deployen
|
|
#
|
|
# Setzt den GWÖ-Monitor in Uptime Kuma auf Wartung,
|
|
# deployed, und aktiviert den Monitor wieder.
|
|
#
|
|
# Benötigt: UPTIME_KUMA_USER + UPTIME_KUMA_PASS in ~/.env oder als ENV
|
|
|
|
set -euo pipefail
|
|
|
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
|
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
|
|
SERVER="vserver"
|
|
REMOTE_DIR="/opt/gwoe-antragspruefer"
|
|
UPTIME_KUMA_URL="https://status.toppyr.de"
|
|
MONITOR_ID=9 # GWÖ-Antragsprüfer
|
|
|
|
# Credentials laden
|
|
if [ -f ~/.env ]; then
|
|
source ~/.env
|
|
fi
|
|
|
|
cd "$PROJECT_DIR"
|
|
|
|
echo "=== GWÖ-Antragsprüfer Deploy ==="
|
|
|
|
# 1. Uptime Kuma auf Wartung setzen
|
|
if [ -n "${UPTIME_KUMA_USER:-}" ] && [ -n "${UPTIME_KUMA_PASS:-}" ]; then
|
|
echo "⏸ Setze Monitor auf Wartung..."
|
|
python3 -c "
|
|
from uptime_kuma_api import UptimeKumaApi
|
|
api = UptimeKumaApi('$UPTIME_KUMA_URL')
|
|
api.login('$UPTIME_KUMA_USER', '$UPTIME_KUMA_PASS')
|
|
api.pause_monitor($MONITOR_ID)
|
|
api.disconnect()
|
|
print(' Monitor pausiert')
|
|
" 2>/dev/null || echo " (Uptime Kuma nicht erreichbar, überspringe)"
|
|
else
|
|
echo "⚠ UPTIME_KUMA_USER/PASS nicht gesetzt, überspringe Wartungsmodus"
|
|
fi
|
|
|
|
# 2. Build + Deploy
|
|
if [ $# -gt 0 ]; then
|
|
# Spezifische Files
|
|
echo "📦 Packe: $@"
|
|
tar czf /tmp/gwoe-deploy.tar.gz "$@"
|
|
else
|
|
# Alles
|
|
echo "📦 Packe gesamtes Projekt (ohne venv/data/reports)..."
|
|
tar czf /tmp/gwoe-deploy.tar.gz \
|
|
--exclude='venv' --exclude='__pycache__' \
|
|
--exclude='data' --exclude='reports' --exclude='.env' .
|
|
fi
|
|
|
|
echo "🚀 Upload + Build..."
|
|
scp /tmp/gwoe-deploy.tar.gz "$SERVER:/tmp/"
|
|
ssh "$SERVER" "cd $REMOTE_DIR && tar xzf /tmp/gwoe-deploy.tar.gz && docker compose up -d --build" 2>&1 | tail -5
|
|
|
|
# 3. Warte auf Health
|
|
echo "⏳ Warte auf Health-Check..."
|
|
for i in $(seq 1 30); do
|
|
code=$(curl -sS -o /dev/null -w "%{http_code}" --max-time 3 "https://gwoe.toppyr.de/health" 2>/dev/null || echo "000")
|
|
if [ "$code" = "200" ]; then
|
|
echo "✅ Health OK nach ${i}s"
|
|
break
|
|
fi
|
|
sleep 1
|
|
done
|
|
|
|
# 4. Uptime Kuma wieder aktivieren
|
|
if [ -n "${UPTIME_KUMA_USER:-}" ] && [ -n "${UPTIME_KUMA_PASS:-}" ]; then
|
|
echo "▶ Reaktiviere Monitor..."
|
|
python3 -c "
|
|
from uptime_kuma_api import UptimeKumaApi
|
|
api = UptimeKumaApi('$UPTIME_KUMA_URL')
|
|
api.login('$UPTIME_KUMA_USER', '$UPTIME_KUMA_PASS')
|
|
api.resume_monitor($MONITOR_ID)
|
|
api.disconnect()
|
|
print(' Monitor aktiv')
|
|
" 2>/dev/null || echo " (Uptime Kuma nicht erreichbar)"
|
|
fi
|
|
|
|
echo "=== Deploy abgeschlossen ==="
|