summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xarecordalertfailzero.sh88
-rwxr-xr-xbackup.sh59
-rwxr-xr-xbrains_backup.sh105
-rwxr-xr-xbrains_backup2.sh69
-rwxr-xr-xcheck_services.sh22
-rwxr-xr-xclean_media.sh4
-rwxr-xr-xclean_orphans.sh3
-rwxr-xr-xclean_previewcards.sh4
-rwxr-xr-xcopydunkadunk.sh32
-rwxr-xr-xdasystemisdownyo.sh128
-rwxr-xr-xdasystemisdownyo2.sh135
-rwxr-xr-xdasystemisdownyo3.sh149
-rwxr-xr-xdasystemisdownyo4.sh134
-rwxr-xr-xdb2_backup.sh69
-rwxr-xr-xdeldirectories.sh2
-rwxr-xr-xdetonate.sh23
-rwxr-xr-xdisk_mitigator.sh65
-rwxr-xr-xdo_the_needful.sh60
-rwxr-xr-xdotheneedfuleverywhere.sh49
-rwxr-xr-xdr_mirror_to_linode.sh44
-rwxr-xr-xdr_telegram_alert.sh19
-rwxr-xr-xfix_queue.sh38
-rwxr-xr-xfix_queue2.sh44
-rwxr-xr-xfix_queue3.sh41
-rwxr-xr-xfixsudoerseverywhere.sh33
-rwxr-xr-xfreezer.sh20
-rwxr-xr-xfreezermove.sh48
-rwxr-xr-xgenesis_agg.sh44
-rwxr-xr-xgenesis_agg1.sh52
-rwxr-xr-xgenesis_check.sh24
-rwxr-xr-xgenesis_sync_progress.sh25
-rwxr-xr-xget_telegram_id.sh6
-rwxr-xr-xgiteapushv3.sh41
-rwxr-xr-xhardenit.sh56
-rwxr-xr-xhoneypot_checker.sh41
-rwxr-xr-xhoneypot_selftest_pull.sh47
-rwxr-xr-xkodakmoment.sh89
-rwxr-xr-xkodakmomentproxmox.sh40
-rwxr-xr-xkrang_backup.sh32
-rwxr-xr-xkrang_modular_health.sh112
-rwxr-xr-xlinux_masto.sh52
-rwxr-xr-xmalips.sh49
-rwxr-xr-xmastodon_restart.sh87
-rwxr-xr-xmastodon_status-check.sh69
-rwxr-xr-xmastodon_token.secret3
-rwxr-xr-xmigrationtoblock.sh72
-rwxr-xr-xp1.sh74
-rwxr-xr-xp2.sh74
-rwxr-xr-xperms.sh23
-rwxr-xr-xpull_health_everywhere_ntp.sh101
-rwxr-xr-xpushandbuild.sh40
-rwxr-xr-xrestore.sh68
-rwxr-xr-xretention.sh71
-rwxr-xr-xrsync_zfs_sync_helper.sh56
-rwxr-xr-xrun_prune_from_krang.sh18
-rwxr-xr-xsnapshot_send_to_vault.sh28
-rwxr-xr-xstartemup.sh67
-rwxr-xr-xsync-to-vault.sh33
-rwxr-xr-xsync-trigger.sh48
-rwxr-xr-xsync.sh24
-rwxr-xr-xsync_everything_v3.sh91
-rwxr-xr-xtothebank.sh23
-rwxr-xr-xupgrade.sh88
-rwxr-xr-xvalidate_zfs.sh78
-rwxr-xr-xvenv-backup-script.sh85
-rwxr-xr-xverify_minio.sh39
-rwxr-xr-xverifypxe.sh62
-rwxr-xr-xwatchdog.sh39
-rwxr-xr-xwatchman.sh73
-rwxr-xr-xzfs_bootstrap.sh50
70 files changed, 3781 insertions, 0 deletions
diff --git a/arecordalertfailzero.sh b/arecordalertfailzero.sh
new file mode 100755
index 0000000..fadeba8
--- /dev/null
+++ b/arecordalertfailzero.sh
@@ -0,0 +1,88 @@
+#!/bin/bash
+
+# FailZero Early Warning System (FZ EWS)
+# Monitor critical hosts and alert on Telegram on failure
+
+# === INSERT YOUR TELEGRAM CREDENTIALS BELOW ===
+BOT_TOKEN="8031184325:AAEGj3gzwYF8HaLjWHVe0gOG5bzo63tcRbU"
+CHAT_ID="1559582356"
+
+# === INSERT YOUR CRITICAL HOSTNAMES BELOW ===
+CRITICAL_HOSTS=(
+ "da.genesishostingtechnologies.com"
+ "zcluster.technodrome1.sshjunkie.com"
+ "zcluster.technodrome2.sshjunkie.com"
+ "krang.core.sshjunkie.com"
+ "tt.themediahub.org"
+ "toot.themediahub.org"
+ "chatwithus.live"
+ "genesishostingtechnologies.com"
+ "portal.genesishostingtechnologies.com"
+ "brandoncharles.us"
+ # Add more hostnames here, one per line inside quotes
+)
+
+LOG_FILE="/home/doc/fz_ews.log"
+COOLDOWN_FILE="/home/doc/fz_ews_cooldown"
+
+# Cooldown period in seconds to prevent alert spam (e.g., 3600 = 1 hour)
+ALERT_COOLDOWN=3600
+
+send_telegram_alert() {
+ local message="$1"
+ curl -s -X POST "https://api.telegram.org/bot$BOT_TOKEN/sendMessage" \
+ -d chat_id="$CHAT_ID" \
+ -d text="🚨 FailZero EWS Alert: $message" > /dev/null
+}
+
+log() {
+ echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" >> "$LOG_FILE"
+}
+
+check_cooldown() {
+ local host="$1"
+ local now=$(date +%s)
+ local last_alert=$(grep "^$host " "$COOLDOWN_FILE" 2>/dev/null | awk '{print $2}')
+ if [[ -z "$last_alert" ]]; then
+ return 0
+ fi
+ local elapsed=$((now - last_alert))
+ if (( elapsed > ALERT_COOLDOWN )); then
+ return 0
+ else
+ return 1
+ fi
+}
+
+update_cooldown() {
+ local host="$1"
+ local now=$(date +%s)
+ # Remove existing entry for host if any
+ grep -v "^$host " "$COOLDOWN_FILE" 2>/dev/null > "${COOLDOWN_FILE}.tmp"
+ mv "${COOLDOWN_FILE}.tmp" "$COOLDOWN_FILE"
+ # Append new timestamp
+ echo "$host $now" >> "$COOLDOWN_FILE"
+}
+
+check_host() {
+ local host="$1"
+ if ping -c 2 -W 3 "$host" > /dev/null 2>&1; then
+ log "$host is UP"
+ else
+ log "$host is DOWN"
+ if check_cooldown "$host"; then
+ send_telegram_alert "$host is DOWN or unreachable!"
+ update_cooldown "$host"
+ else
+ log "Cooldown active for $host; alert suppressed"
+ fi
+ fi
+}
+
+main() {
+ for host in "${CRITICAL_HOSTS[@]}"; do
+ check_host "$host"
+ done
+}
+
+main
diff --git a/backup.sh b/backup.sh
new file mode 100755
index 0000000..03d70e6
--- /dev/null
+++ b/backup.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+#blargh
+# Configuration
+SRC_DIR="/home/mastodon/live"
+DEST_DIR="/home/mastodon/backup"
+PG_DB_NAME="mastodon_production"
+PG_USER="mastodon"
+PG_HOST="" # Leave empty for local socket connection
+PG_PORT="5432"
+TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
+BACKUP_DIR="${DEST_DIR}/mastodon_backup_${TIMESTAMP}"
+LOG_FILE="${DEST_DIR}/backup_${TIMESTAMP}.log"
+
+# Ensure the destination directory exists
+mkdir -p "$BACKUP_DIR" || { echo "Failed to create backup directory"; exit 1; }
+
+# Backup Mastodon files
+echo "Starting rsync backup of Mastodon files..."
+rsync -av --delete "$SRC_DIR" "$BACKUP_DIR/mastodon_files" >> "$LOG_FILE" 2>&1 || { echo "rsync failed"; exit 1; }
+
+# Backup Nginx configuration files
+echo "Starting backup of Nginx configuration files..."
+rsync -av /etc/nginx "$BACKUP_DIR/nginx_configs" >> "$LOG_FILE" 2>&1 || { echo "rsync failed to backup Nginx configs"; exit 1; }
+
+# Backup PostgreSQL database
+echo "Starting PostgreSQL database backup..."
+pg_dump -U "$PG_USER" -d "$PG_DB_NAME" > "$BACKUP_DIR/mastodon_db_${TIMESTAMP}.sql" >> "$LOG_FILE" 2>&1 || { echo "pg_dump failed"; exit 1; }
+
+# Compress the backup
+echo "Compressing backup..."
+tar -czf "${BACKUP_DIR}.tar.gz" -C "$DEST_DIR" "mastodon_backup_${TIMESTAMP}" >> "$LOG_FILE" 2>&1 || { echo "Compression failed"; exit 1; }
+
+# Remove the uncompressed backup directory
+echo "Removing uncompressed backup directory..."
+ls -l "$BACKUP_DIR" >> "$LOG_FILE" 2>&1 # Debugging output
+rm -rf "$BACKUP_DIR" >> "$LOG_FILE" 2>&1 || { echo "Failed to remove uncompressed backup directory"; exit 1; }
+
+# Transfer backup to remote server
+REMOTE_USER="root"
+REMOTE_HOST="209.209.9.128"
+REMOTE_DIR="/mnt/e"
+
+echo "Transferring backup to remote server..." >> "$LOG_FILE" 2>&1
+rsync -av "${DEST_DIR}/mastodon_backup_${TIMESTAMP}.tar.gz" "${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_DIR}" >> "$LOG_FILE" 2>&1 || { echo "Remote rsync failed"; exit 1; }
+
+# Remove local compressed backup file
+echo "Removing local compressed backup file..." >> "$LOG_FILE" 2>&1
+rm "${DEST_DIR}/mastodon_backup_${TIMESTAMP}.tar.gz" >> "$LOG_FILE" 2>&1 || { echo "Failed to remove local backup file"; exit 1; }
+
+# Move log files to /home/mastodon/logs
+LOG_DEST_DIR="/home/mastodon/logs"
+mkdir -p "$LOG_DEST_DIR" >> "$LOG_FILE" 2>&1 || { echo "Failed to create log destination directory"; exit 1; }
+mv "$LOG_FILE" "${LOG_DEST_DIR}/backup_${TIMESTAMP}.log" >> "$LOG_FILE" 2>&1 || { echo "Failed to move log file"; exit 1; }
+
+# Clean up backup directory
+echo "Cleaning up backup directory..." >> "$LOG_FILE" 2>&1
+rm -rf "${DEST_DIR}"/* >> "$LOG_FILE" 2>&1 || { echo "Failed to clean up backup directory"; exit 1; }
+
+echo "Backup completed: ${BACKUP_DIR}.tar.gz"
diff --git a/brains_backup.sh b/brains_backup.sh
new file mode 100755
index 0000000..fe25dd5
--- /dev/null
+++ b/brains_backup.sh
@@ -0,0 +1,105 @@
+#!/bin/bash
+set -euo pipefail
+
+# === SSH Hosts ===
+SHREDDER_HOST="doc@shredder.sshjunkie.com"
+PORTAL_HOST="root@portal.genesishostingtechnologies.com"
+DA_HOST="root@da.genesishostingtechnologies.com"
+VAULT_HOST="root@thevault.bounceme.net"
+
+# === Telegram Setup ===
+TG_TOKEN="7277705363:AAGSw5Pmcbf7IsSyZKMqU6PJ4VsVwdKLRH0"
+TG_CHAT_ID="1559582356"
+
+send_telegram() {
+ local message="$1"
+ curl -s -X POST "https://api.telegram.org/bot$TG_TOKEN/sendMessage" \
+ -d "chat_id=$TG_CHAT_ID&text=$message"
+}
+
+# === Local Staging Area on Krang ===
+STAGING_DATASET="/deadbeef/staging"
+DATE=$(date +%Y%m%d%H%M%S)
+BACKUP_DIR="$STAGING_DATASET/brains-$DATE"
+mkdir -p "$BACKUP_DIR"
+
+send_telegram "🧠 Starting centralized backup from Krang to $BACKUP_DIR..."
+
+# === Sanity Checks on TheVault ===
+send_telegram "πŸ§ͺ Sanity check: ensuring TheVault datasets are mounted..."
+ssh -o BatchMode=yes $VAULT_HOST "zfs mount -a && ls /backups/azuracast /backups/krang /backups/directadmin" && \
+send_telegram "βœ… TheVault mountpoints verified!" || \
+send_telegram "❌ TheVault mountpoint sanity check FAILED!"
+
+# === Sanity Checks on Shredder ===
+send_telegram "πŸ§ͺ Sanity check: ensuring Shredder datasets are mounted..."
+ssh -o BatchMode=yes $SHREDDER_HOST "zfs mount -a && ls /assets/splmedia /assets/azuracast /assets/pokbackups /assets/splshows" && \
+send_telegram "βœ… Shredder mountpoints verified!" || \
+send_telegram "❌ Shredder mountpoint sanity check FAILED!"
+
+# === Helper Function for Steps ===
+run_backup_step() {
+ local description="$1"
+ shift
+ send_telegram "πŸ”„ $description"
+ if "$@"; then
+ send_telegram "βœ… $description complete!"
+ else
+ send_telegram "❌ $description FAILED!"
+ fi
+}
+
+# === 1️⃣ Sync SPL and Shredder Data ===
+run_backup_step "Syncing SPL and Shredder data" \
+ rsync -avz -e "ssh -o BatchMode=yes" $SHREDDER_HOST:/mnt/spl/ "$BACKUP_DIR/splmedia/" && \
+ rsync -avz -e "ssh -o BatchMode=yes" $SHREDDER_HOST:/mnt/spl/ "$BACKUP_DIR/splassets/"
+
+# === 2️⃣ Backup Krang's Configs ===
+run_backup_step "Backing up Krang host configs" \
+ rsync -avz /etc/pve "$BACKUP_DIR/krang-pve/" && \
+ rsync -avz /etc/network/interfaces "$BACKUP_DIR/krang-net/" && \
+ rsync -avz /etc/ssh "$BACKUP_DIR/krang-ssh/"
+
+# === 3️⃣ Backup AzuraCast DB from Portal ===
+run_backup_step "Backing up AzuraCast DB from Portal" \
+ ssh -o BatchMode=yes $PORTAL_HOST "pg_dump -U postgres azuracast" > "$BACKUP_DIR/databases/azuracast.sql"
+
+# === 4️⃣ Backup AzuraCast Configs ===
+run_backup_step "Backing up AzuraCast configs from Portal" \
+ rsync -avz -e "ssh -o BatchMode=yes" $PORTAL_HOST:/var/azuracast/.env "$BACKUP_DIR/azuracast/" && \
+ rsync -avz -e "ssh -o BatchMode=yes" $PORTAL_HOST:/var/azuracast/docker-compose.yml "$BACKUP_DIR/azuracast/" && \
+ rsync -avz -e "ssh -o BatchMode=yes" $PORTAL_HOST:/var/azuracast/stations "$BACKUP_DIR/azuracast/stations"
+
+# === 5️⃣ Sync AzuraCast Media from Portal to Shredder, then to Krang ===
+run_backup_step "Syncing AzuraCast media from Portal to Shredder" \
+ ssh -o BatchMode=yes $PORTAL_HOST "rsync -avz /mnt/azuracast1/ doc@shredder.sshjunkie.com:/assets/azuracast/"
+
+run_backup_step "Backing up AzuraCast media from Shredder to Krang" \
+ rsync -avz -e "ssh -o BatchMode=yes" $SHREDDER_HOST:/assets/azuracast/ "$BACKUP_DIR/azuracast-media/"
+
+# === 6️⃣ Backup DirectAdmin Configs ===
+run_backup_step "Backing up DirectAdmin from Portal" \
+ rsync -avz -e "ssh -o BatchMode=yes" $DA_HOST:/usr/local/directadmin/data/admin/ "$BACKUP_DIR/directadmin/"
+
+# === 7️⃣ Push to TheVault ===
+send_telegram "πŸ”„ Pushing backups to TheVault datasets..."
+
+# AzuraCast Configs & DB
+rsync -avz -e "ssh -o BatchMode=yes" "$BACKUP_DIR/azuracast/" $VAULT_HOST:/backups/azuracast/configs/$DATE/
+rsync -avz -e "ssh -o BatchMode=yes" "$BACKUP_DIR/databases/" $VAULT_HOST:/backups/azuracast/configs/$DATE/
+
+# AzuraCast Media
+rsync -avz -e "ssh -o BatchMode=yes" "$BACKUP_DIR/azuracast-media/" $VAULT_HOST:/backups/azuracast/$DATE/
+
+# DirectAdmin
+rsync -avz -e "ssh -o BatchMode=yes" "$BACKUP_DIR/directadmin/" $VAULT_HOST:/backups/directadmin/$DATE/
+
+# Krang
+rsync -avz -e "ssh -o BatchMode=yes" "$BACKUP_DIR/krang-*/" $VAULT_HOST:/backups/krang/$DATE/
+
+# SPL
+rsync -avz -e "ssh -o BatchMode=yes" "$BACKUP_DIR/splmedia/" $VAULT_HOST:/backups/splmedia/$DATE/
+rsync -avz -e "ssh -o BatchMode=yes" "$BACKUP_DIR/splassets/" $VAULT_HOST:/backups/splassets/$DATE/
+
+send_telegram "πŸŽ‰ FULL SYSTEM BACKUP COMPLETED & MIRRORED TO VAULT!"
+exit 0
diff --git a/brains_backup2.sh b/brains_backup2.sh
new file mode 100755
index 0000000..d34e56e
--- /dev/null
+++ b/brains_backup2.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+set -euo pipefail
+
+# === SSH Hosts ===
+SHREDDER_HOST="doc@shredder.sshjunkie.com"
+PORTAL_HOST="root@portal.genesishostingtechnologies.com"
+DA_HOST="root@da.genesishostingtechnologies.com"
+VAULT_HOST="root@thevault.bounceme.net"
+DATE=$(date +%Y%m%d%H%M%S)
+
+# === Telegram Setup ===
+TG_TOKEN="7277705363:AAGSw5Pmcbf7IsSyZKMqU6PJ4VsVwdKLRH0"
+TG_CHAT_ID="1559582356"
+
+send_telegram() {
+ local message="$1"
+ curl -s -X POST "https://api.telegram.org/bot$TG_TOKEN/sendMessage" \
+ -d "chat_id=$TG_CHAT_ID&text=$message"
+}
+
+send_telegram "🧠 Starting V2 Direct-to-Vault Backup Orchestration..."
+
+# === Sanity Checks on TheVault and Shredder Mounts ===
+send_telegram "πŸ§ͺ Sanity check: vault & shredder mounts..."
+#ssh -o BatchMode=yes $VAULT_HOST "zfs mount -a && ls /backups/azuracast /backups/krang /backups/directadmin" || send_telegram "❌ Vault sanity check FAILED!"
+#ssh -o BatchMode=yes $SHREDDER_HOST "
+# zfs mount assets/splmedia &&
+# zfs mount assets/azuracast &&
+# zfs mount assets/splshows &&
+# ls /assets/splmedia /assets/azuracast /assets/splshows
+#"
+
+# === 1️⃣ Direct SPL data: Shredder β†’ TheVault ===
+send_telegram "πŸ”„ Syncing SPL data directly from Shredder to TheVault..."
+ssh -o BatchMode=yes $SHREDDER_HOST "rsync -avz /mnt/spl/splmedia/ $VAULT_HOST:/backups/splmedia/$DATE/"
+ssh -o BatchMode=yes $SHREDDER_HOST "rsync -avz /mnt/spl/splassets/ $VAULT_HOST:/backups/splassets/$DATE/"
+ssh -o BatchMode=yes $SHREDDER_HOST "rsync -avz /mnt/spl/splshows/ $VAULT_HOST:/backups/splshows/$DATE/"
+send_telegram "βœ… SPL data sync complete!"
+
+# === 2️⃣ Direct AzuraCast media: Shredder β†’ TheVault ===
+send_telegram "πŸ”„ Syncing AzuraCast media directly from Shredder to TheVault..."
+ssh -o BatchMode=yes $SHREDDER_HOST "rsync -avz /mnt/shredder.sshjunkie.com/azuracast/ $VAULT_HOST:/backups/azuracast/$DATE/"
+send_telegram "βœ… AzuraCast media sync complete!"
+
+# === 3️⃣ Direct AzuraCast configs: Portal β†’ TheVault ===
+send_telegram "πŸ”„ Syncing AzuraCast configs from Portal to TheVault..."
+ssh -o BatchMode=yes $PORTAL_HOST "rsync -avz /var/azuracast/.env /var/azuracast/docker-compose.yml /var/azuracast/stations $VAULT_HOST:/backups/azuracast/configs/$DATE/"
+send_telegram "βœ… AzuraCast configs sync complete!"
+
+# === 4️⃣ Direct AzuraCast DB dump: Portal β†’ TheVault ===
+send_telegram "πŸ”„ Dumping and pushing AzuraCast DB from Portal to TheVault..."
+ssh -o BatchMode=yes $PORTAL_HOST "pg_dump -U postgres azuracast | ssh $VAULT_HOST 'cat > /backups/azuracast/configs/$DATE/azuracast.sql'"
+send_telegram "βœ… AzuraCast DB push complete!"
+
+# === 5️⃣ Direct DirectAdmin backup: Portal β†’ TheVault ===
+send_telegram "πŸ”„ Syncing DirectAdmin configs from Portal to TheVault..."
+ssh -o BatchMode=yes $PORTAL_HOST "rsync -avz /usr/local/directadmin/data/admin/ $VAULT_HOST:/backups/directadmin/$DATE/"
+send_telegram "βœ… DirectAdmin sync complete!"
+
+# === 6️⃣ Krang's Proxmox configs: Krang β†’ TheVault ===
+send_telegram "πŸ”„ Syncing Krang configs to TheVault..."
+rsync -avz /etc/pve $VAULT_HOST:/backups/krang/$DATE/pve/
+rsync -avz /etc/network/interfaces $VAULT_HOST:/backups/krang/$DATE/network/
+rsync -avz /etc/ssh $VAULT_HOST:/backups/krang/$DATE/ssh/
+send_telegram "βœ… Krang configs push complete!"
+
+# === πŸŽ‰ All done! ===
+send_telegram "πŸŽ‰ V2 Direct-to-Vault Backup COMPLETED!"
+exit 0
diff --git a/check_services.sh b/check_services.sh
new file mode 100755
index 0000000..3d32937
--- /dev/null
+++ b/check_services.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# check_services.sh – outputs JSON for frontend status page
+
+check_ping() {
+ ping -c1 -W1 "$1" >/dev/null 2>&1 && echo "online" || echo "offline"
+}
+
+check_tcp() {
+ nc -z -w 2 "$1" "$2" >/dev/null 2>&1 && echo "online" || echo "offline"
+}
+
+TEAMTALK_STATUS=$(check_tcp tt.themediahub.org 10442)
+DA_STATUS=$(check_tcp da.genesishostingtechnologies.com 2222)
+SHREDDER_STATUS=$(check_ping shredder.sshjunkie.com)
+
+cat <<EOF
+{
+ "teamtalk": "$TEAMTALK_STATUS",
+ "directadmin": "$DA_STATUS",
+ "shredder": "$SHREDDER_STATUS"
+}
+EOF
diff --git a/clean_media.sh b/clean_media.sh
new file mode 100755
index 0000000..fc914ee
--- /dev/null
+++ b/clean_media.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+cd /home/mastodon/live
+PATH=/home/mastodon/bin:/home/mastodon/.local/bin:/home/mastodon/.rbenv/plugins/ruby-build/bin:/home/mastodon/.rbenv/shims:/home/mastodon/.rbenv/bin:/usr/bin:/bin
+RAILS_ENV=production bin/tootctl media remove --days=1 > log/media_remove.log 2>&1
diff --git a/clean_orphans.sh b/clean_orphans.sh
new file mode 100755
index 0000000..8577775
--- /dev/null
+++ b/clean_orphans.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+cd /home/mastodon/live
+PATH=/home/mastodon/bin:/home/mastodon/.local/bin:/home/mastodon/.rbenv/plugins/ruby-build/bin:/home/mastodon/.rbenv/shims:/home/mastodon/.rbenv/bin:/usr/bin:/bin
diff --git a/clean_previewcards.sh b/clean_previewcards.sh
new file mode 100755
index 0000000..60c9965
--- /dev/null
+++ b/clean_previewcards.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+cd /home/mastodon/live
+PATH=/home/mastodon/bin:/home/mastodon/.local/bin:/home/mastodon/.rbenv/plugins/ruby-build/bin:/home/mastodon/.rbenv/shims:/home/mastodon/.rbenv/bin:/usr/bin:/bin
+RAILS_ENV=production bin/tootctl preview-cards remove --days=14 > log/preview-cards_remove.log 2>&1
diff --git a/copydunkadunk.sh b/copydunkadunk.sh
new file mode 100755
index 0000000..9d4095c
--- /dev/null
+++ b/copydunkadunk.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+set -euo pipefail
+
+# Base path where your current datasets are mounted
+BASE_PATH="/assets"
+
+# Mapping of underscore-named folders to dash-named equivalents
+declare -A BUCKETS=(
+ ["assets_azuracast"]="assets-azuracast"
+ ["assets_archives"]="assets-archives"
+ ["assets_genesisassets"]="assets-genesisassets"
+ ["assets_genesislibrary"]="assets-genesislibrary"
+ ["assets_teamtalkdata"]="assets-teamtalkdata"
+)
+
+echo "=== Copying underscore-named folders to dash-named MinIO bucket folders ==="
+for SRC in "${!BUCKETS[@]}"; do
+ DEST="${BUCKETS[$SRC]}"
+ echo "πŸ“¦ Copying $SRC to $DEST ..."
+ rsync -a --info=progress2 "$BASE_PATH/$SRC/" "$BASE_PATH/$DEST/"
+ chown -R minio-user:minio-user "$BASE_PATH/$DEST"
+done
+
+echo ""
+echo "βœ… Done. You can now point MinIO at these dash-named paths:"
+for DEST in "${BUCKETS[@]}"; do
+ echo " /assets/$DEST"
+done
+
+echo "πŸ”„ Then restart MinIO:"
+echo " systemctl daemon-reload && systemctl restart minio"
diff --git a/dasystemisdownyo.sh b/dasystemisdownyo.sh
new file mode 100755
index 0000000..34f50ce
--- /dev/null
+++ b/dasystemisdownyo.sh
@@ -0,0 +1,128 @@
+#!/bin/bash
+#da system is down yo
+# === CONFIG ===
+REMOTE_USER="doc"
+BOT_TOKEN="7277705363:AAGSw5Pmcbf7IsSyZKMqU6PJ4VsVwdKLRH0"
+CHAT_ID="1559582356"
+TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
+LOGFILE="$HOME/krang-logs/health-$(date '+%Y%m%d-%H%M').log"
+
+SWAP_LIMIT_MB=512
+LOAD_LIMIT=4.0
+mkdir -p "$HOME/krang-logs"
+
+SERVERS=(
+ zcluster.technodrome1.sshjunkie.com
+ zcluster.technodrome2.sshjunkie.com
+ shredder.sshjunkie.com
+ chatwithus.live
+ portal.genesishostingtechnologies.com
+)
+
+declare -A HOST_ROLES=(
+ [zcluster.technodrome1]="postgres"
+ [zcluster.technodrome2]="postgres"
+ [shredder]="minio docker"
+ [chatwithus.live]="mastodon docker nginx"
+ [portal.genesishostingtechnologies.com.com]="azuracast docker nginx"
+)
+
+SUMMARY="πŸ“‘ Krang System Health Report - $TIMESTAMP
+
+"
+
+for HOST in "${SERVERS[@]}"; do
+ SHORT_HOST=$(echo "$HOST" | cut -d'.' -f1)
+ echo "πŸ” Collecting from $HOST..."
+
+ DATA=$(ssh "$REMOTE_USER@$HOST" bash -s << 'EOF'
+set -e
+HOST=$(hostname)
+MEM=$(awk '/MemAvailable/ {printf "%.1f Gi free", $2 / 1024 / 1024}' /proc/meminfo)
+SWAP_RAW=$(free -m | awk '/Swap:/ {print $3}')
+SWAP="$SWAP_RAW Mi used"
+DISK=$(df -h / | awk 'NR==2 {print $4 " free"}')
+LOAD=$(uptime | awk -F'load average:' '{print $2}' | cut -d, -f1 | xargs)
+UPTIME=$(uptime -p)
+
+# Graceful service status checks
+check_status() {
+ systemctl is-active "$1" 2>/dev/null || echo "inactive"
+}
+NGINX=$(check_status nginx)
+DOCKER=$(check_status docker)
+PGSQL=$(check_status postgresql)
+
+echo "$HOST|$MEM|$SWAP_RAW|$SWAP|$DISK|$LOAD|$UPTIME|$NGINX|$DOCKER|$PGSQL"
+EOF
+) || {
+ SUMMARY+="πŸ–₯️ $HOST
+❌ Failed to connect or run checks.
+"
+ continue
+}
+
+ IFS='|' read -r H MEM SWAP_MB SWAP_HUMAN DISK LOAD1 UPTIME_STATUS NGINX_STATUS DOCKER_STATUS PGSQL_STATUS <<< "$DATA"
+ ROLES="${HOST_ROLES[$SHORT_HOST]}"
+ ALERTS=""
+
+ if [[ -n "$SWAP_MB" && "$SWAP_MB" =~ ^[0-9]+$ && "$SWAP_MB" -gt "$SWAP_LIMIT_MB" ]]; then
+ ALERTS+="⚠️ HIGH SWAP ($SWAP_HUMAN)
+"
+ fi
+
+ if [[ -n "$LOAD1" ]]; then
+ LOAD_HIGH=$(awk "BEGIN {print ($LOAD1 > $LOAD_LIMIT) ? 1 : 0}")
+ [ "$LOAD_HIGH" -eq 1 ] && ALERTS+="⚠️ HIGH LOAD ($LOAD1)
+"
+ fi
+
+ [[ "$ROLES" == *"nginx"* && "$NGINX_STATUS" != "active" ]] && ALERTS+="❌ NGINX not running
+"
+ [[ "$ROLES" == *"docker"* && "$DOCKER_STATUS" != "active" ]] && ALERTS+="❌ Docker not running
+"
+ [[ "$ROLES" == *"postgres"* && "$PGSQL_STATUS" != "active" ]] && ALERTS+="❌ PostgreSQL not running
+"
+
+ ALERTS_MSG=""
+ [ -n "$ALERTS" ] && ALERTS_MSG="🚨 ALERTS:
+$ALERTS"
+
+ SUMMARY+="πŸ–₯️ $H
+β€’ Mem: $MEM
+β€’ Swap: $SWAP_HUMAN
+β€’ Disk: $DISK
+β€’ Load: ${LOAD1:-Unavailable}
+β€’ Uptime: $UPTIME_STATUS
+β€’ Roles: ${ROLES:-none}
+$ALERTS_MSG
+"
+done
+
+# === KRANG CLOCK ACCURACY CHECK ===
+NTP_RESULT=$(ntpdate -q time.google.com 2>&1)
+OFFSET=$(echo "$NTP_RESULT" | awk '/offset/ {print $10}')
+if [[ "$OFFSET" =~ ^-?[0-9.]+$ ]]; then
+ OFFSET_MS=$(awk "BEGIN {printf "%.0f", $OFFSET * 1000}")
+ if (( OFFSET_MS > 500 || OFFSET_MS < -500 )); then
+ CORRECTION=$(ntpdate -u time.google.com 2>&1)
+ SUMMARY+="πŸ› οΈ Auto-corrected Krang clock via ntpdate: $CORRECTION
+"
+ SUMMARY+="πŸ•°οΈ Krang Clock Offset: ${OFFSET_MS}ms β€” ⚠️ OUT OF SYNC
+"
+ else
+ SUMMARY+="πŸ•°οΈ Krang Clock Offset: ${OFFSET_MS}ms β€” βœ… SYNCHRONIZED
+"
+ fi
+else
+ SUMMARY+="πŸ•°οΈ Krang Clock Check: ❌ FAILED to retrieve offset.
+"
+fi
+
+# Log to file
+echo -e "$SUMMARY" > "$LOGFILE"
+
+# Send to Telegram
+curl -s -X POST https://api.telegram.org/bot$BOT_TOKEN/sendMessage \
+ -d chat_id="$CHAT_ID" \
+ -d text="$SUMMARY"
diff --git a/dasystemisdownyo2.sh b/dasystemisdownyo2.sh
new file mode 100755
index 0000000..6f65f3b
--- /dev/null
+++ b/dasystemisdownyo2.sh
@@ -0,0 +1,135 @@
+#!/bin/bash
+# da system is down yo – Krang Healthcheck
+# Monitors system health across all Genesis nodes
+
+# === CONFIG ===
+REMOTE_USER="doc"
+BOT_TOKEN="7277705363:AAGSw5Pmcbf7IsSyZKMqU6PJ4VsVwdKLRH0"
+CHAT_ID="1559582356"
+TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
+LOGFILE="$HOME/krang-logs/health-$(date '+%Y%m%d-%H%M').log"
+
+SWAP_LIMIT_MB=512
+LOAD_LIMIT=4.0
+mkdir -p "$HOME/krang-logs"
+
+# === Host list ===
+SERVERS=(
+ zcluster.technodrome1.sshjunkie.com
+ zcluster.technodrome2.sshjunkie.com
+ shredder.sshjunkie.com
+ chatwithus.live
+ portal.genesishostingtechnologies.com
+)
+
+# === Roles per host ===
+declare -A HOST_ROLES=(
+ [zcluster.technodrome1]="postgres"
+ [zcluster.technodrome2]="postgres"
+ [shredder]="minio"
+ [chatwithus]="mastodon docker nginx"
+ [portal]="azuracast docker nginx"
+)
+
+SUMMARY="πŸ“‘ Krang System Health Report - $TIMESTAMP
+
+"
+
+for HOST in "${SERVERS[@]}"; do
+ SHORT_HOST=$(echo "$HOST" | cut -d'.' -f1)
+ echo "πŸ” Collecting from $HOST..."
+
+ DATA=$(ssh "$REMOTE_USER@$HOST" bash -s << 'EOF'
+set -e
+HOST=$(hostname)
+MEM=$(awk '/MemAvailable/ {printf "%.1f Gi free", $2 / 1024 / 1024}' /proc/meminfo)
+SWAP_RAW=$(free -m | awk '/Swap:/ {print $3}')
+SWAP="$SWAP_RAW Mi used"
+DISK=$(df -h / | awk 'NR==2 {print $4 " free"}')
+LOAD=$(uptime | awk -F'load average:' '{print $2}' | cut -d, -f1 | xargs)
+UPTIME=$(uptime -p)
+
+# Graceful service status checks
+check_status() {
+ systemctl is-active "$1" 2>/dev/null || echo "inactive"
+}
+NGINX=$(check_status nginx)
+DOCKER=$(check_status docker)
+PGSQL=$(check_status postgresql)
+
+echo "$HOST|$MEM|$SWAP_RAW|$SWAP|$DISK|$LOAD|$UPTIME|$NGINX|$DOCKER|$PGSQL"
+EOF
+) || {
+ SUMMARY+="πŸ–₯️ $HOST
+❌ Failed to connect or run checks.
+"
+ continue
+}
+
+ IFS='|' read -r H MEM SWAP_MB SWAP_HUMAN DISK LOAD1 UPTIME_STATUS NGINX_STATUS DOCKER_STATUS PGSQL_STATUS <<< "$DATA"
+ ROLES="${HOST_ROLES[$SHORT_HOST]}"
+ ALERTS=""
+
+ # === Smart Swap Alert: only if memory is low OR system is under load ===
+ if [[ -n "$SWAP_MB" && "$SWAP_MB" =~ ^[0-9]+$ && "$SWAP_MB" -gt "$SWAP_LIMIT_MB" ]]; then
+ MEM_MB=$(echo "$MEM" | awk '{printf "%d", $1 * 1024}' 2>/dev/null)
+ LOAD_HIGH=$(awk "BEGIN {print ($LOAD1 > $LOAD_LIMIT) ? 1 : 0}")
+ if [[ "$MEM_MB" -lt 1024 || "$LOAD_HIGH" -eq 1 ]]; then
+ ALERTS+="⚠️ HIGH SWAP ($SWAP_HUMAN)\n"
+ fi
+ fi
+
+ # === Load Alert ===
+ if [[ -n "$LOAD1" ]]; then
+ LOAD_HIGH=$(awk "BEGIN {print ($LOAD1 > $LOAD_LIMIT) ? 1 : 0}")
+ [ "$LOAD_HIGH" -eq 1 ] && ALERTS+="⚠️ HIGH LOAD ($LOAD1)\n"
+ fi
+
+ # === Service Status Checks ===
+ [[ "$ROLES" == *"nginx"* && "$NGINX_STATUS" != "active" ]] && ALERTS+="❌ NGINX not running\n"
+ if [[ "$ROLES" == *"docker"* && "$SHORT_HOST" != "shredder" && "$DOCKER_STATUS" != "active" ]]; then
+ ALERTS+="❌ Docker not running\n"
+ fi
+ [[ "$ROLES" == *"postgres"* && "$PGSQL_STATUS" != "active" ]] && ALERTS+="❌ PostgreSQL not running\n"
+
+ ALERTS_MSG=""
+ [ -n "$ALERTS" ] && ALERTS_MSG="🚨 ALERTS:
+$ALERTS"
+
+ SUMMARY+="πŸ–₯️ $H
+β€’ Mem: $MEM
+β€’ Swap: $SWAP_HUMAN
+β€’ Disk: $DISK
+β€’ Load: ${LOAD1:-Unavailable}
+β€’ Uptime: $UPTIME_STATUS
+β€’ Roles: ${ROLES:-none}
+$ALERTS_MSG
+"
+done
+
+# === Krang Clock Sync Check ===
+NTP_RESULT=$(ntpdate -q time.google.com 2>&1)
+OFFSET=$(echo "$NTP_RESULT" | awk '/offset/ {print $10}')
+if [[ "$OFFSET" =~ ^-?[0-9.]+$ ]]; then
+ OFFSET_MS=$(awk "BEGIN {printf \"%.0f\", $OFFSET * 1000}")
+ if (( OFFSET_MS > 500 || OFFSET_MS < -500 )); then
+ CORRECTION=$(ntpdate -u time.google.com 2>&1)
+ SUMMARY+="πŸ› οΈ Auto-corrected Krang clock via ntpdate: $CORRECTION
+"
+ SUMMARY+="πŸ•°οΈ Krang Clock Offset: ${OFFSET_MS}ms β€” ⚠️ OUT OF SYNC
+"
+ else
+ SUMMARY+="πŸ•°οΈ Krang Clock Offset: ${OFFSET_MS}ms β€” βœ… SYNCHRONIZED
+"
+ fi
+else
+ SUMMARY+="πŸ•°οΈ Krang Clock Check: ❌ FAILED to retrieve offset.
+"
+fi
+
+# === Log & Send ===
+echo -e "$SUMMARY" > "$LOGFILE"
+
+curl -s -X POST https://api.telegram.org/bot$BOT_TOKEN/sendMessage \
+ -d chat_id="$CHAT_ID" \
+ -d text="$SUMMARY"
diff --git a/dasystemisdownyo3.sh b/dasystemisdownyo3.sh
new file mode 100755
index 0000000..e884eec
--- /dev/null
+++ b/dasystemisdownyo3.sh
@@ -0,0 +1,149 @@
+#!/bin/bash
+# da system is down yo – Krang Healthcheck
+# Monitors system health across all Genesis nodes
+
+# === CONFIG ===
+REMOTE_USER="doc"
+BOT_TOKEN="7277705363:AAGSw5Pmcbf7IsSyZKMqU6PJ4VsVwdKLRH0"
+CHAT_ID="1559582356"
+TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
+LOGFILE="$HOME/krang-logs/health-$(date '+%Y%m%d-%H%M').log"
+
+SWAP_LIMIT_MB=512
+LOAD_LIMIT=4.0
+mkdir -p "$HOME/krang-logs"
+
+# === Host list ===
+SERVERS=(
+ zcluster.technodrome1.sshjunkie.com
+ zcluster.technodrome2.sshjunkie.com
+ shredder.sshjunkie.com
+ chatwithus.live
+ portal.genesishostingtechnologies.com
+)
+
+# === Roles per host ===
+declare -A HOST_ROLES=(
+ [zcluster.technodrome1]="postgres"
+ [zcluster.technodrome2]="postgres"
+ [shredder]="minio"
+ [chatwithus]="mastodon nginx"
+ [portal]="azuracast docker nginx"
+)
+
+SUMMARY="πŸ“‘ Krang System Health Report - $TIMESTAMP
+
+"
+
+for HOST in "${SERVERS[@]}"; do
+ SHORT_HOST=$(echo "$HOST" | cut -d'.' -f1)
+ echo "πŸ” Collecting from $HOST..."
+
+ DATA=$(ssh "$REMOTE_USER@$HOST" bash -s << 'EOF'
+set -e
+HOST=$(hostname)
+MEM=$(awk '/MemAvailable/ {printf "%.1f Gi free", $2 / 1024 / 1024}' /proc/meminfo)
+SWAP_RAW=$(free -m | awk '/Swap:/ {print $3}')
+SWAP="$SWAP_RAW Mi used"
+DISK=$(df -h / | awk 'NR==2 {print $4 " free"}')
+LOAD=$(uptime | awk -F'load average:' '{print $2}' | cut -d, -f1 | xargs)
+UPTIME=$(uptime -p)
+
+# Functional service checks
+if curl -s --head http://localhost | grep -q "200 OK"; then
+ NGINX="active"
+else
+ NGINX="inactive"
+fi
+
+if [ -S /var/run/docker.sock ] && docker info >/dev/null 2>&1; then
+ DOCKER="active"
+else
+ DOCKER="inactive"
+fi
+
+if command -v pg_isready >/dev/null 2>&1 && pg_isready -q >/dev/null 2>&1; then
+ PGSQL="active"
+else
+ PGSQL="inactive"
+fi
+
+# Optional Mastodon public instance check (only if applicable)
+MSTDN_OK=$(curl -s --max-time 5 -o /dev/null -w "%{http_code}" https://chatwithus.live/api/v1/instance)
+[ "$MSTDN_OK" = "200" ] && MASTODON="active" || MASTODON="inactive"
+
+echo "$HOST|$MEM|$SWAP_RAW|$SWAP|$DISK|$LOAD|$UPTIME|$NGINX|$DOCKER|$PGSQL|$MASTODON"
+EOF
+) || {
+ SUMMARY+="πŸ–₯️ $HOST
+❌ Failed to connect or run checks.
+"
+ continue
+}
+
+ IFS='|' read -r H MEM SWAP_MB SWAP_HUMAN DISK LOAD1 UPTIME_STATUS NGINX_STATUS DOCKER_STATUS PGSQL_STATUS MASTODON_STATUS <<< "$DATA"
+ ROLES="${HOST_ROLES[$SHORT_HOST]}"
+ ALERTS=""
+
+ # === Smart Swap Alert: only if memory is low OR system is under load ===
+ if [[ -n "$SWAP_MB" && "$SWAP_MB" =~ ^[0-9]+$ && "$SWAP_MB" -gt "$SWAP_LIMIT_MB" ]]; then
+ MEM_MB=$(echo "$MEM" | awk '{printf "%d", $1 * 1024}' 2>/dev/null)
+ LOAD_HIGH=$(awk "BEGIN {print ($LOAD1 > $LOAD_LIMIT) ? 1 : 0}")
+ if [[ "$MEM_MB" -lt 1024 || "$LOAD_HIGH" -eq 1 ]]; then
+ ALERTS+="⚠️ HIGH SWAP ($SWAP_HUMAN)\n"
+ fi
+ fi
+
+ # === Load Alert ===
+ if [[ -n "$LOAD1" ]]; then
+ LOAD_HIGH=$(awk "BEGIN {print ($LOAD1 > $LOAD_LIMIT) ? 1 : 0}")
+ [ "$LOAD_HIGH" -eq 1 ] && ALERTS+="⚠️ HIGH LOAD ($LOAD1)\n"
+ fi
+
+ # === Functional Service Status Alerts ===
+ [[ "$ROLES" == *"nginx"* && "$NGINX_STATUS" != "active" ]] && ALERTS+="❌ NGINX not responding on localhost\n"
+ [[ "$ROLES" == *"docker"* && "$DOCKER_STATUS" != "active" && "$SHORT_HOST" != "shredder" ]] && ALERTS+="❌ Docker not functional\n"
+ [[ "$ROLES" == *"postgres"* && "$PGSQL_STATUS" != "active" ]] && ALERTS+="❌ PostgreSQL not ready\n"
+ [[ "$ROLES" == *"mastodon"* && "$MASTODON_STATUS" != "active" ]] && ALERTS+="❌ Mastodon API check failed\n"
+
+ ALERTS_MSG=""
+ [ -n "$ALERTS" ] && ALERTS_MSG="🚨 ALERTS:
+$ALERTS"
+
+ SUMMARY+="πŸ–₯️ $H
+β€’ Mem: $MEM
+β€’ Swap: $SWAP_HUMAN
+β€’ Disk: $DISK
+β€’ Load: ${LOAD1:-Unavailable}
+β€’ Uptime: $UPTIME_STATUS
+β€’ Roles: ${ROLES:-none}
+$ALERTS_MSG
+"
+done
+
+# === Krang Clock Sync Check ===
+NTP_RESULT=$(ntpdate -q time.google.com 2>&1)
+OFFSET=$(echo "$NTP_RESULT" | awk '/offset/ {print $10}')
+if [[ "$OFFSET" =~ ^-?[0-9.]+$ ]]; then
+ OFFSET_MS=$(awk "BEGIN {printf \"%.0f\", $OFFSET * 1000}")
+ if (( OFFSET_MS > 500 || OFFSET_MS < -500 )); then
+ CORRECTION=$(ntpdate -u time.google.com 2>&1)
+ SUMMARY+="πŸ› οΈ Auto-corrected Krang clock via ntpdate: $CORRECTION
+"
+ SUMMARY+="πŸ•°οΈ Krang Clock Offset: ${OFFSET_MS}ms β€” ⚠️ OUT OF SYNC
+"
+ else
+ SUMMARY+="πŸ•°οΈ Krang Clock Offset: ${OFFSET_MS}ms β€” βœ… SYNCHRONIZED
+"
+ fi
+else
+ SUMMARY+="πŸ•°οΈ Krang Clock Check: ❌ FAILED to retrieve offset.
+"
+fi
+
+# === Log & Send ===
+echo -e "$SUMMARY" > "$LOGFILE"
+
+curl -s -X POST https://api.telegram.org/bot$BOT_TOKEN/sendMessage \
+ -d chat_id="$CHAT_ID" \
+ -d text="$SUMMARY"
diff --git a/dasystemisdownyo4.sh b/dasystemisdownyo4.sh
new file mode 100755
index 0000000..5fbd342
--- /dev/null
+++ b/dasystemisdownyo4.sh
@@ -0,0 +1,134 @@
+#!/bin/bash
+# da system is down yo – Krang Healthcheck
+# Monitors system health across all Genesis nodes
+
+# === CONFIG ===
+REMOTE_USER="doc"
+BOT_TOKEN="7277705363:AAGSw5Pmcbf7IsSyZKMqU6PJ4VsVwdKLRH0"
+CHAT_ID="1559582356"
+TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
+LOGFILE="$HOME/krang-logs/health-$(date '+%Y%m%d-%H%M').log"
+
+SWAP_LIMIT_MB=512
+LOAD_LIMIT=4.0
+mkdir -p "$HOME/krang-logs"
+
+# === Host list ===
+SERVERS=(
+ zcluster.technodrome1.sshjunkie.com
+ zcluster.technodrome2.sshjunkie.com
+ shredder.sshjunkie.com
+ chatwithus.live
+ portal.genesishostingtechnologies.com
+)
+
+# === Roles per host ===
+declare -A HOST_ROLES=(
+ [zcluster.technodrome1]="postgres"
+ [zcluster.technodrome2]="postgres"
+ [shredder]="minio"
+ [chatwithus]="mastodon"
+ [portal]="azuracast"
+)
+
+SUMMARY="πŸ“‘ Krang System Health Report - $TIMESTAMP
+
+"
+
+for HOST in "${SERVERS[@]}"; do
+ SHORT_HOST=$(echo "$HOST" | cut -d'.' -f1)
+ echo "πŸ” Collecting from $HOST..."
+
+ DATA=$(ssh "$REMOTE_USER@$HOST" bash -s << 'EOF'
+set -e
+HOST=$(hostname)
+MEM=$(awk '/MemAvailable/ {printf "%.1f Gi free", $2 / 1024 / 1024}' /proc/meminfo)
+SWAP_RAW=$(free -m | awk '/Swap:/ {print $3}')
+SWAP="$SWAP_RAW Mi used"
+DISK=$(df -h / | awk 'NR==2 {print $4 " free"}')
+LOAD=$(uptime | awk -F'load average:' '{print $2}' | cut -d, -f1 | xargs)
+UPTIME=$(uptime -p)
+
+# Functional service checks
+if command -v pg_isready >/dev/null 2>&1 && pg_isready -q >/dev/null 2>&1; then
+ PGSQL="active"
+else
+ PGSQL="inactive"
+fi
+
+MSTDN_OK=$(curl -s --max-time 5 -o /dev/null -w "%{http_code}" https://chatwithus.live/api/v1/instance)
+[ "$MSTDN_OK" = "200" ] && MASTODON="active" || MASTODON="inactive"
+
+echo "$HOST|$MEM|$SWAP_RAW|$SWAP|$DISK|$LOAD|$UPTIME|$PGSQL|$MASTODON"
+EOF
+) || {
+ SUMMARY+="πŸ–₯️ $HOST
+❌ Failed to connect or run checks.
+"
+ continue
+}
+
+ IFS='|' read -r H MEM SWAP_MB SWAP_HUMAN DISK LOAD1 UPTIME_STATUS PGSQL_STATUS MASTODON_STATUS <<< "$DATA"
+ ROLES="${HOST_ROLES[$SHORT_HOST]}"
+ ALERTS=""
+
+ # === Smart Swap Alert: only if memory is low OR system is under load ===
+ if [[ -n "$SWAP_MB" && "$SWAP_MB" =~ ^[0-9]+$ && "$SWAP_MB" -gt "$SWAP_LIMIT_MB" ]]; then
+ MEM_MB=$(echo "$MEM" | awk '{printf "%d", $1 * 1024}' 2>/dev/null)
+ LOAD_HIGH=$(awk "BEGIN {print ($LOAD1 > $LOAD_LIMIT) ? 1 : 0}")
+ if [[ "$MEM_MB" -lt 1024 || "$LOAD_HIGH" -eq 1 ]]; then
+ ALERTS+="⚠️ HIGH SWAP ($SWAP_HUMAN)\n"
+ fi
+ fi
+
+ # === Load Alert ===
+ if [[ -n "$LOAD1" ]]; then
+ LOAD_HIGH=$(awk "BEGIN {print ($LOAD1 > $LOAD_LIMIT) ? 1 : 0}")
+ [ "$LOAD_HIGH" -eq 1 ] && ALERTS+="⚠️ HIGH LOAD ($LOAD1)\n"
+ fi
+
+ # === Functional Service Status Alerts ===
+ [[ "$ROLES" == *"postgres"* && "$PGSQL_STATUS" != "active" ]] && ALERTS+="❌ PostgreSQL not ready\n"
+ [[ "$ROLES" == *"mastodon"* && "$MASTODON_STATUS" != "active" ]] && ALERTS+="❌ Mastodon API check failed\n"
+
+ ALERTS_MSG=""
+ [ -n "$ALERTS" ] && ALERTS_MSG="🚨 ALERTS:
+$ALERTS"
+
+ SUMMARY+="πŸ–₯️ $H
+β€’ Mem: $MEM
+β€’ Swap: $SWAP_HUMAN
+β€’ Disk: $DISK
+β€’ Load: ${LOAD1:-Unavailable}
+β€’ Uptime: $UPTIME_STATUS
+β€’ Roles: ${ROLES:-none}
+$ALERTS_MSG
+"
+done
+
+# === Krang Clock Sync Check ===
+NTP_RESULT=$(ntpdate -q time.google.com 2>&1)
+OFFSET=$(echo "$NTP_RESULT" | awk '/offset/ {print $10}')
+if [[ "$OFFSET" =~ ^-?[0-9.]+$ ]]; then
+ OFFSET_MS=$(awk "BEGIN {printf \"%.0f\", $OFFSET * 1000}")
+ if (( OFFSET_MS > 500 || OFFSET_MS < -500 )); then
+ CORRECTION=$(ntpdate -u time.google.com 2>&1)
+ SUMMARY+="πŸ› οΈ Auto-corrected Krang clock via ntpdate: $CORRECTION
+"
+ SUMMARY+="πŸ•°οΈ Krang Clock Offset: ${OFFSET_MS}ms β€” ⚠️ OUT OF SYNC
+"
+ else
+ SUMMARY+="πŸ•°οΈ Krang Clock Offset: ${OFFSET_MS}ms β€” βœ… SYNCHRONIZED
+"
+ fi
+else
+ SUMMARY+="πŸ•°οΈ Krang Clock Check: ❌ FAILED to retrieve offset.
+"
+fi
+
+# === Log & Send ===
+echo -e "$SUMMARY" > "$LOGFILE"
+
+curl -s -X POST https://api.telegram.org/bot$BOT_TOKEN/sendMessage \
+ -d chat_id="$CHAT_ID" \
+ -d text="$SUMMARY"
diff --git a/db2_backup.sh b/db2_backup.sh
new file mode 100755
index 0000000..3e399bb
--- /dev/null
+++ b/db2_backup.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+#
+# Script Name: db2_zfs_backup.sh
+# Description: Creates a raw base backup of PostgreSQL on zcluster.technodrome2 using pg_basebackup in directory mode.
+# Transfers the backup to The Vault’s ZFS dataset and snapshots it for long-term retention.
+# Requirements: pg_basebackup, SSH access, rclone or rsync, ZFS dataset available at destination
+# Usage: ./db2_zfs_backup.sh
+# Author: Doc @ Genesis Ops
+# Date: 2025-05-12
+#
+
+### CONFIGURATION ###
+SOURCE_SERVER="zcluster.technodrome2.sshjunkie.com"
+SOURCE_USER="doc"
+PG_USER="postgres"
+SOURCE_BASE_DIR="/tmp/db2_backup" # On the remote node
+BACKUP_LABEL="$(date +%Y%m%d%H%M)"
+REMOTE_BACKUP_DIR="$SOURCE_BASE_DIR/$BACKUP_LABEL"
+
+# Remote source rclone config (optional)
+SOURCE_REMOTE="technodrome2"
+
+# Local destination
+DEST_DATASET="vaultpool/postgresql/db2" # Adjust as needed
+DEST_MOUNT="/nexus/postgresql/db2" # Must be mountpoint for $DEST_DATASET
+FULL_DEST="$DEST_MOUNT/$BACKUP_LABEL"
+
+#####################
+
+echo "πŸš€ Starting ZFS-aware base backup for db2 from $SOURCE_SERVER..."
+
+# Ensure pg_basebackup will run cleanly
+ssh $SOURCE_USER@$SOURCE_SERVER "sudo mkdir -p '$REMOTE_BACKUP_DIR' && \
+ sudo pg_basebackup -h localhost -D '$REMOTE_BACKUP_DIR' -U $PG_USER -Fp -R -X fetch -P"
+
+if [[ $? -ne 0 ]]; then
+ echo "❌ pg_basebackup failed on $SOURCE_SERVER."
+ exit 1
+fi
+
+echo "πŸ“¦ Backup created on $SOURCE_SERVER at $REMOTE_BACKUP_DIR"
+
+# Pull the backup using rsync (preserves structure + timestamps)
+echo "πŸ”„ Syncing backup to The Vault at $FULL_DEST..."
+mkdir -p "$FULL_DEST"
+rsync -avz --progress $SOURCE_USER@$SOURCE_SERVER:"$REMOTE_BACKUP_DIR/" "$FULL_DEST/"
+
+if [[ $? -ne 0 ]]; then
+ echo "❌ rsync transfer failed!"
+ exit 1
+fi
+
+# Snapshot the full ZFS backup dataset
+SNAPSHOT_NAME="${DEST_DATASET}@${BACKUP_LABEL}"
+echo "πŸ“Έ Creating ZFS snapshot: $SNAPSHOT_NAME"
+zfs snapshot "$SNAPSHOT_NAME"
+
+if [[ $? -eq 0 ]]; then
+ echo "βœ… Snapshot $SNAPSHOT_NAME created successfully."
+else
+ echo "❌ Snapshot creation failed."
+ exit 1
+fi
+
+# Optional: Clean up the remote backup dir
+echo "🧹 Cleaning up temporary backup on $SOURCE_SERVER..."
+ssh $SOURCE_USER@$SOURCE_SERVER "sudo rm -rf '$REMOTE_BACKUP_DIR'"
+
+echo "πŸŽ‰ Backup and ZFS snapshot complete. Stored in $FULL_DEST"
diff --git a/deldirectories.sh b/deldirectories.sh
new file mode 100755
index 0000000..f24fd47
--- /dev/null
+++ b/deldirectories.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+find /home/mastodon/backup/mastodon_backup* -mindepth 1 -type d -exec rm -rf {} +
diff --git a/detonate.sh b/detonate.sh
new file mode 100755
index 0000000..5defdae
--- /dev/null
+++ b/detonate.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# This script finds and blows away directory landmines in a MinIO-mounted filesystem
+# where files are supposed to go but directories already exist. Use with caution.
+
+LOG="/tmp/minio_detonation.log"
+ERROR_LOG="/tmp/rclonemasto-dump.log"
+TARGET_BASE="/assets/minio-data/mastodon"
+
+echo "[*] Scanning for blocking directories... πŸ’£" | tee "$LOG"
+
+grep 'is a directory' "$ERROR_LOG" | \
+awk -F': open ' '{print $2}' | \
+sed 's/: is a directory//' | \
+sort -u | while read -r bad_path; do
+ if [ -d "$bad_path" ]; then
+ echo "[πŸ’₯] Nuking: $bad_path" | tee -a "$LOG"
+ rm -rf "$bad_path"
+ else
+ echo "[βœ”οΈ] Skipped (not a dir): $bad_path" | tee -a "$LOG"
+ fi
+done
+
+echo "[βœ…] All blocking directories removed. Re-run rclone and finish the war." | tee -a "$LOG"
diff --git a/disk_mitigator.sh b/disk_mitigator.sh
new file mode 100755
index 0000000..11f5242
--- /dev/null
+++ b/disk_mitigator.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+# disk space mitigation tool for linux hosts
+
+# === Prompt for Target ===
+read -p "Enter SSH username: " USERNAME
+read -p "Enter server hostname (e.g. krang.internal): " HOSTNAME
+
+REMOTE="$USERNAME@$HOSTNAME"
+
+# === Alert Config (local alerts) ===
+TELEGRAM_BOT_TOKEN="8178867489:AAH0VjN7VnZSCIWasSz_y97iBLLjPJA751k"
+TELEGRAM_CHAT_ID="1559582356"
+MASTODON_INSTANCE="https://chatwithus.live"
+MASTODON_TOKEN="rimxBLi-eaJAcwagkmoj6UoW7Lc473tQY0cOM041Euw"
+TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
+
+# === Remote Disk Check + Cleanup Script ===
+REMOTE_SCRIPT=$(cat << 'EOF'
+#!/bin/bash
+THRESHOLD_PERCENT=15
+HOST=$(hostname)
+ALERTED=false
+
+df -h --output=target,pcent | tail -n +2 | while read -r mount usage; do
+ percent=$(echo "$usage" | tr -d '%')
+ if [ "$percent" -ge $((100 - THRESHOLD_PERCENT)) ]; then
+ echo "[!] $HOST: Low space on $mount ($usage used). Running cleanup..."
+
+ apt-get clean -y > /dev/null 2>&1
+ journalctl --vacuum-time=3d > /dev/null 2>&1
+ docker system prune -af --volumes > /dev/null 2>&1
+ rm -rf /tmp/* /var/tmp/*
+
+ echo "[βœ“] $HOST: Cleanup complete for $mount"
+ else
+ echo "[OK] $HOST: $mount has enough space ($usage used)"
+ fi
+done
+EOF
+)
+
+# === Run Remote Script via SSH ===
+echo "[*] Connecting to $REMOTE..."
+OUTPUT=$(ssh "$REMOTE" "$REMOTE_SCRIPT")
+
+# === Log and Notify ===
+echo "[$TIMESTAMP] === Remote Disk Check on $HOSTNAME ===" >> /var/log/disk_mitigator.log
+echo "$OUTPUT" >> /var/log/disk_mitigator.log
+
+# Alert if low space was found
+if echo "$OUTPUT" | grep -q "\[!\]"; then
+ MSG="⚠️ Disk cleanup triggered on $HOSTNAME via Krang.\n\n$OUTPUT"
+
+ # Send alerts
+ curl -s -X POST "https://api.telegram.org/bot${TELEGRAM_BOT_TOKEN}/sendMessage" \
+ -d chat_id="$TELEGRAM_CHAT_ID" \
+ -d text="$MSG" > /dev/null
+
+ curl -s -X POST "$MASTODON_INSTANCE/api/v1/statuses" \
+ -H "Authorization: Bearer $MASTODON_TOKEN" \
+ -d "status=$MSG" \
+ -d "visibility=unlisted" > /dev/null
+fi
+
+echo "[βœ“] Done. Output logged and alerts (if any) sent."
diff --git a/do_the_needful.sh b/do_the_needful.sh
new file mode 100755
index 0000000..b927847
--- /dev/null
+++ b/do_the_needful.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+# === CONFIG ===
+SWAPPINESS_LEVEL=10
+LOG_CLEANUP_LIMIT_DAYS=14
+APACHE_SERVICES=("apache2" "httpd")
+HOST=$(hostname)
+TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
+
+# === Telegram Config ===
+BOT_TOKEN="8178867489:AAH0VjN7VnZSCIWasSz_y97iBLLjPJA751k"
+CHAT_ID="1559582356"
+
+echo "πŸ”§ [$HOST] Starting health cleanup..."
+
+# 1. Tune swappiness
+echo "β†’ Setting vm.swappiness to $SWAPPINESS_LEVEL"
+echo "vm.swappiness=$SWAPPINESS_LEVEL" | tee /etc/sysctl.d/99-swappiness.conf > /dev/null
+sysctl -p /etc/sysctl.d/99-swappiness.conf > /dev/null
+
+# 2. Disable Apache if not needed
+apache_disabled=""
+for svc in "${APACHE_SERVICES[@]}"; do
+ if systemctl list-units --type=service --all | grep -q "$svc"; then
+ echo "β†’ Apache service '$svc' detected"
+ if ! ss -tulpn | grep -q ":80"; then
+ echo " πŸ”• Apache appears idle. Disabling..."
+ systemctl disable --now "$svc"
+ apache_disabled="yes"
+ else
+ echo " ⚠️ Apache is running and serving. Skipping stop."
+ fi
+ fi
+done
+
+# 3. Clean logs older than X days
+echo "β†’ Cleaning logs older than $LOG_CLEANUP_LIMIT_DAYS days in /var/log"
+find /var/log -type f -name "*.log" -mtime +$LOG_CLEANUP_LIMIT_DAYS -exec rm -f {} \;
+
+# 4. Summary Info
+MEM=$(free -h | grep Mem | awk '{print $4 " free"}')
+SWAP=$(free -h | grep Swap | awk '{print $3 " used"}')
+DISK=$(df -h / | awk 'NR==2 {print $4 " free"}')
+LOAD=$(uptime | awk -F'load average:' '{print $2}' | xargs)
+
+MSG="βœ… [$HOST] Cleanup completed at $TIMESTAMP
+Memory: $MEM
+Swap: $SWAP
+Disk: $DISK
+Load: $LOAD"
+
+if [ "$apache_disabled" == "yes" ]; then
+ MSG="$MSG
+Apache was detected and disabled βœ…"
+fi
+
+# 5. Send Telegram message
+curl -s -X POST https://api.telegram.org/bot$BOT_TOKEN/sendMessage \
+ -d chat_id="$CHAT_ID" \
+ -d text="$MSG"
diff --git a/dotheneedfuleverywhere.sh b/dotheneedfuleverywhere.sh
new file mode 100755
index 0000000..7926b78
--- /dev/null
+++ b/dotheneedfuleverywhere.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+# === CONFIG ===
+SCRIPT_PATH="/usr/local/bin/do_the_needful.sh"
+REMOTE_USER="doc"
+BOT_TOKEN="8178867489:AAH0VjN7VnZSCIWasSz_y97iBLLjPJA751k"
+CHAT_ID="1559582356"
+TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
+
+SERVERS=(
+ thevault.sshjunkie.com
+ zcluster.technodrome1.sshjunkie.com
+ zcluster.technodrome2.sshjunkie.com
+ shredder.sshjunkie.com
+ chatwithus.live
+)
+
+SUMMARY="πŸ€– Krang Deployment Report - $TIMESTAMP\n\n"
+FAILURES=0
+
+for HOST in "${SERVERS[@]}"; do
+ echo "πŸš€ Deploying to $HOST..."
+
+ # Upload script to temp location
+ scp "$SCRIPT_PATH" "$REMOTE_USER@$HOST:/tmp/do_the_needful.sh"
+ if [ $? -ne 0 ]; then
+ SUMMARY+="❌ $HOST: SCP failed\n"
+ ((FAILURES++))
+ continue
+ fi
+
+ # Move into place and execute
+ ssh "$REMOTE_USER@$HOST" "sudo install -m 755 /tmp/do_the_needful.sh $SCRIPT_PATH && sudo $SCRIPT_PATH"
+ if [ $? -ne 0 ]; then
+ SUMMARY+="❌ $HOST: sudo execution failed\n"
+ ((FAILURES++))
+ else
+ SUMMARY+="βœ… $HOST: cleaned successfully\n"
+ fi
+
+ echo "----------------------------------"
+done
+
+# === Send Telegram Summary ===
+FINAL_STATUS="🚨 Some hosts failed." && [ "$FAILURES" -eq 0 ] && FINAL_STATUS="βœ… All hosts completed."
+
+curl -s -X POST https://api.telegram.org/bot$BOT_TOKEN/sendMessage \
+ -d chat_id="$CHAT_ID" \
+ -d text="$FINAL_STATUS\n\n$SUMMARY"
diff --git a/dr_mirror_to_linode.sh b/dr_mirror_to_linode.sh
new file mode 100755
index 0000000..b15ad97
--- /dev/null
+++ b/dr_mirror_to_linode.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+# === CONFIG ===
+ZFS_MOUNT="/assets"
+LINODE_ALIAS="linode"
+KRANG_BOT_TOKEN="your-bot-token-here"
+CHAT_ID="your-chat-id-here"
+MINIO_SERVICE="minio"
+LOG_DIR="/home/doc/genesisdr" # <- customize this!
+
+# === SETUP ===
+mkdir -p "$LOG_DIR"
+TIMESTAMP=$(date '+%Y-%m-%d_%H-%M-%S')
+LOG_FILE="$LOG_DIR/mirror_$TIMESTAMP.log"
+
+# === START LOGGING ===
+exec > >(tee -a "$LOG_FILE") 2>&1
+
+echo "πŸ” Genesis DR MinIO Mirror Log β€” $TIMESTAMP"
+echo "Log file: $LOG_FILE"
+echo "Starting DR mirror from $ZFS_MOUNT to $LINODE_ALIAS"
+echo "-------------------------------------------"
+
+# === SYNC ===
+mc mirror --overwrite "$ZFS_MOUNT" "$LINODE_ALIAS" --quiet
+MIRROR_STATUS=$?
+
+if [[ $MIRROR_STATUS -ne 0 ]]; then
+ echo "❌ Mirror failed with exit code $MIRROR_STATUS"
+ curl -s -X POST https://api.telegram.org/bot$KRANG_BOT_TOKEN/sendMessage \
+ -d chat_id="$CHAT_ID" \
+ -d text="❌ MinIO DR mirror to Linode FAILED. MinIO remains offline. Log: $LOG_FILE"
+ exit 1
+fi
+
+echo "βœ… Mirror complete. Starting MinIO..."
+systemctl start "$MINIO_SERVICE"
+
+curl -s -X POST https://api.telegram.org/bot$KRANG_BOT_TOKEN/sendMessage \
+ -d chat_id="$CHAT_ID" \
+ -d text="βœ… MinIO DR mirror to Linode completed successfully. MinIO is online. Log: $LOG_FILE"
+
+echo "πŸš€ All done."
+echo "-------------------------------------------"
diff --git a/dr_telegram_alert.sh b/dr_telegram_alert.sh
new file mode 100755
index 0000000..aaccda6
--- /dev/null
+++ b/dr_telegram_alert.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+# Telegram Bot Token and Chat ID
+TELEGRAM_BOT_TOKEN="8178867489:AAH0VjN7VnZSCIWasSz_y97iBLLjPJA751k"
+TELEGRAM_CHAT_ID="1559582356"
+
+# Function to send Telegram message
+send_telegram_message() {
+ local message="$1"
+ curl -s -X POST "https://api.telegram.org/bot$TELEGRAM_BOT_TOKEN/sendMessage" \
+ -d chat_id=$TELEGRAM_CHAT_ID \
+ -d text="$message" > /dev/null
+}
+
+# Check if it's the first of the month and send a reminder
+current_day=$(date +%d)
+if [ "$current_day" -eq "01" ]; then
+ send_telegram_message "Reminder: It's the 1st of the month! Please run a disaster recovery drill and test restore on all datasets."
+fi
diff --git a/fix_queue.sh b/fix_queue.sh
new file mode 100755
index 0000000..77580b9
--- /dev/null
+++ b/fix_queue.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+# ===== CONFIG =====
+USERNAME="$1"
+RAILS_ENV=production
+cd /home/mastodon/live || exit 1
+
+if [[ -z "$USERNAME" ]]; then
+ echo "❌ Usage: $0 <username>"
+ exit 1
+fi
+
+echo "πŸ” Looking up account ID for @$USERNAME..."
+ACCOUNT_ID=$(sudo -u mastodon -H bundle exec rails runner "
+acct = Account.find_by(username: '$USERNAME')
+puts acct&.id || 'not_found'
+")
+
+if [[ "$ACCOUNT_ID" == "not_found" ]]; then
+ echo "❌ Account @$USERNAME not found."
+ exit 1
+fi
+
+echo "πŸ—‘οΈ Deleting Redis cache for home timeline..."
+sudo -u mastodon -H redis-cli DEL feed:home:$ACCOUNT_ID
+
+echo "🧱 Rebuilding timeline from followed accounts..."
+sudo -u mastodon -H bundle exec rails runner "
+acct = Account.find_by(username: '$USERNAME')
+if acct
+ FeedInsertWorker.push_bulk(acct.following.pluck(:id)) do |follower_id|
+ [follower_id, acct.id]
+ end
+ puts 'βœ… Timeline repopulation enqueued.'
+end
+"
+
+echo "βœ… Done. Home timeline for @$USERNAME reset and rebuilt."
diff --git a/fix_queue2.sh b/fix_queue2.sh
new file mode 100755
index 0000000..da69102
--- /dev/null
+++ b/fix_queue2.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+# ===== CONFIG =====
+USERNAME="$1"
+RAILS_ENV=production
+cd /home/mastodon/live || exit 1
+
+if [[ -z "$USERNAME" ]]; then
+ echo "❌ Usage: $0 <username>"
+ exit 1
+fi
+
+# Set full path for bundle
+BUNDLE_PATH="/home/mastodon/.rbenv/shims/bundle"
+
+# Set RAILS_ENV for the script execution
+export RAILS_ENV=production
+
+echo "πŸ” Looking up account ID for @$USERNAME..."
+ACCOUNT_ID=$(sudo -u mastodon -H $BUNDLE_PATH exec rails runner "
+acct = Account.find_by(username: '$USERNAME')
+puts acct&.id || 'not_found'
+")
+
+if [[ "$ACCOUNT_ID" == "not_found" ]]; then
+ echo "❌ Account @$USERNAME not found."
+ exit 1
+fi
+
+echo "πŸ—‘οΈ Deleting Redis cache for home timeline..."
+sudo -u mastodon -H redis-cli DEL feed:home:$ACCOUNT_ID
+
+echo "🧱 Rebuilding timeline from followed accounts..."
+sudo -u mastodon -H $BUNDLE_PATH exec rails runner "
+acct = Account.find_by(username: '$USERNAME')
+if acct
+ FeedInsertWorker.push_bulk(acct.following.pluck(:id)) do |follower_id|
+ [follower_id, acct.id]
+ end
+ puts 'βœ… Timeline repopulation enqueued.'
+end
+"
+
+echo "βœ… Done. Home timeline for @$USERNAME reset and rebuilt."
diff --git a/fix_queue3.sh b/fix_queue3.sh
new file mode 100755
index 0000000..c28d42a
--- /dev/null
+++ b/fix_queue3.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+# ===== CONFIG =====
+USERNAME="$1"
+RAILS_ENV=production
+cd /home/mastodon/live || exit 1
+
+if [[ -z "$USERNAME" ]]; then
+ echo "❌ Usage: $0 <username>"
+ exit 1
+fi
+
+# Set full path for bundle
+BUNDLE_PATH="/home/mastodon/.rbenv/shims/bundle"
+
+echo "πŸ” Looking up account ID for @$USERNAME..."
+ACCOUNT_ID=$(sudo -u mastodon -E env RAILS_ENV=production $BUNDLE_PATH exec rails runner "
+acct = Account.find_by(username: '$USERNAME')
+puts acct&.id || 'not_found'
+")
+
+if [[ "$ACCOUNT_ID" == "not_found" ]]; then
+ echo "❌ Account @$USERNAME not found."
+ exit 1
+fi
+
+echo "πŸ—‘οΈ Deleting Redis cache for home timeline..."
+sudo -u mastodon -E env RAILS_ENV=production redis-cli DEL feed:home:$ACCOUNT_ID
+
+echo "🧱 Rebuilding timeline from followed accounts..."
+sudo -u mastodon -E env RAILS_ENV=production $BUNDLE_PATH exec rails runner "
+acct = Account.find_by(username: '$USERNAME')
+if acct
+ FeedInsertWorker.push_bulk(acct.following.pluck(:id)) do |follower_id|
+ [follower_id, acct.id]
+ end
+ puts 'βœ… Timeline repopulation enqueued.'
+end
+"
+
+echo "βœ… Done. Home timeline for @$USERNAME reset and rebuilt."
diff --git a/fixsudoerseverywhere.sh b/fixsudoerseverywhere.sh
new file mode 100755
index 0000000..acf282e
--- /dev/null
+++ b/fixsudoerseverywhere.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# === CONFIG ===
+REMOTE_USER="doc"
+SERVERS=(
+ thevault.sshjunkie.com
+ zcluster.technodrome1.sshjunkie.com
+ zcluster.technodrome2.sshjunkie.com
+ shredder.sshjunkie.com
+ chatwithus.live
+)
+
+SUDO_LINE="doc ALL=(ALL) NOPASSWD:ALL"
+
+# === Execution ===
+for HOST in "${SERVERS[@]}"; do
+ echo "πŸ”§ Fixing sudoers on $HOST..."
+
+ ssh "$REMOTE_USER@$HOST" "sudo bash -c '
+ cp /etc/sudoers /etc/sudoers.bak_krang &&
+ grep -q \"$SUDO_LINE\" /etc/sudoers ||
+ echo \"$SUDO_LINE\" >> /etc/sudoers &&
+ visudo -c >/dev/null
+ '"
+
+ if ssh "$REMOTE_USER@$HOST" "sudo -n true"; then
+ echo "βœ… $HOST: sudo access confirmed"
+ else
+ echo "❌ $HOST: sudo access STILL broken"
+ fi
+
+ echo "----------------------------------"
+done
diff --git a/freezer.sh b/freezer.sh
new file mode 100755
index 0000000..17fa86d
--- /dev/null
+++ b/freezer.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Find all venvs, freeze their packages to requirements.txt
+
+BASE_DIR="$HOME" # Or wherever your projects are
+
+echo "Scanning for venvs under $BASE_DIR ..."
+
+find "$BASE_DIR" -type f -name "pyvenv.cfg" 2>/dev/null | while read cfg; do
+ venv_dir="$(dirname "$cfg")"
+ reqfile="$venv_dir/requirements.txt"
+ echo "πŸ”’ Freezing $venv_dir β†’ $reqfile"
+ "$venv_dir/bin/python" -m pip freeze > "$reqfile"
+ if [ $? -eq 0 ]; then
+ echo "βœ… Done: $reqfile"
+ else
+ echo "❌ Failed to freeze $venv_dir"
+ fi
+done
+
+echo "All venvs processed!"
diff --git a/freezermove.sh b/freezermove.sh
new file mode 100755
index 0000000..320278b
--- /dev/null
+++ b/freezermove.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+SRC_ROOT="/home/doc"
+TARGET_DIR="/home/doc/genesis-tools/venvrequirements"
+DRY_RUN=0
+
+if [[ "$1" == "--dry-run" ]]; then
+ DRY_RUN=1
+ echo "Dry run mode enabled: No files will be created or copied."
+fi
+
+echo "Scanning for venvs in $SRC_ROOT ..."
+
+found_any=0
+for dir in "$SRC_ROOT"/*/; do
+ venv_name=$(basename "$dir")
+ req_file="${dir}requirements.txt"
+ dest_file="$TARGET_DIR/requirements_${venv_name}.txt"
+
+ # Only proceed if it's a directory and requirements.txt exists
+ if [[ -d "$dir" && -f "$req_file" ]]; then
+ found_any=1
+ echo "Found: $req_file"
+ echo "β†’ Would copy to: $dest_file"
+
+ if [[ -f "$dest_file" ]]; then
+ echo " [SKIP] $dest_file already exists. Skipping."
+ else
+ if [[ "$DRY_RUN" -eq 1 ]]; then
+ echo " [DRY RUN] Would copy $req_file β†’ $dest_file"
+ else
+ cp "$req_file" "$dest_file"
+ echo " [OK] Copied $req_file β†’ $dest_file"
+ fi
+ fi
+ echo ""
+ fi
+done
+
+if [[ "$found_any" -eq 0 ]]; then
+ echo "No requirements.txt files found in $SRC_ROOT!"
+fi
+
+if [[ "$DRY_RUN" -eq 1 ]]; then
+ echo "All requirements processed. (Dry run mode)"
+else
+ echo "All requirements copied and organized."
+fi
diff --git a/genesis_agg.sh b/genesis_agg.sh
new file mode 100755
index 0000000..6971234
--- /dev/null
+++ b/genesis_agg.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+# Configuration
+TG_BOT_TOKEN="${TG_BOT_TOKEN:7277705363:AAGSw5Pmcbf7IsSyZKMqU6PJ4VsVwdKLRH0}"
+TG_CHAT_ID="${TG_CHAT_ID:-1559582356}"
+
+declare -A NODES
+NODES=(
+ ["genesis-east"]="root@198.74.58.14"
+ ["genesis-midwest"]="root@45.56.126.90"
+ ["genesis-west"]="root@172.232.172.119"
+)
+
+REMOTE_SCRIPT="/root/genesis_routewatch.sh"
+CRITICAL=0
+OUTPUT=""
+
+send_telegram_alert() {
+ local message="$1"
+ curl -s -X POST "https://api.telegram.org/bot${TG_BOT_TOKEN}/sendMessage" \
+ -d chat_id="${TG_CHAT_ID}" \
+ -d parse_mode="Markdown" \
+ -d text="$message" > /dev/null
+}
+
+for region in "${!NODES[@]}"; do
+ HOST="${NODES[$region]}"
+ echo "🌐 Probing $region ($HOST)..."
+
+ OUTPUT_SEGMENT=$(ssh -o ConnectTimeout=10 "$HOST" "bash $REMOTE_SCRIPT" 2>&1)
+ OUTPUT+="πŸ›°οΈ $region Output:\n$OUTPUT_SEGMENT\n\n"
+
+ if echo "$OUTPUT_SEGMENT" | grep -q "Status: CRITICAL"; then
+ CRITICAL=1
+ fi
+done
+
+# Display results
+echo -e "$OUTPUT"
+
+if [ $CRITICAL -eq 1 ]; then
+ ALERT_MSG="🚨 *GenesisRouteWatch Multi-Region Alert* 🚨\n\n$OUTPUT"
+ send_telegram_alert "$ALERT_MSG"
+fi
diff --git a/genesis_agg1.sh b/genesis_agg1.sh
new file mode 100755
index 0000000..4bb31aa
--- /dev/null
+++ b/genesis_agg1.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+# === Config ===
+declare -A NODES=(
+ [genesis-west]="root@172.232.172.119"
+ [genesis-east]="root@198.74.58.14"
+ [genesis-midwest]="root@45.56.126.90"
+)
+
+TELEGRAM_BOT_TOKEN="7277705363:AAGSw5Pmcbf7IsSyZKMqU6PJ4VsVwdKLRH0"
+TELEGRAM_CHAT_ID="1559582356"
+
+# === Functions ===
+
+send_telegram() {
+ local msg="$1"
+ curl -s -X POST "https://api.telegram.org/bot${TELEGRAM_BOT_TOKEN}/sendMessage" \
+ -d chat_id="${TELEGRAM_CHAT_ID}" \
+ -d text="$msg" \
+ -d parse_mode="Markdown"
+}
+
+# === Main ===
+
+alert_text="*GenesisRouteWatch Alert!*\n"
+issue_found=0
+
+for region in "${!NODES[@]}"; do
+ host="${NODES[$region]}"
+ echo "🌐 Probing $region ($host)..."
+ output=$(ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no $host "/root/genesis_routewatch.sh" 2>/dev/null)
+
+ echo "πŸ›°οΈ $region Output:"
+ echo "$output"
+ echo
+
+ # Save raw report
+ full_report+="πŸ›°οΈ *$region*:\n\`\`\`\n$output\n\`\`\`\n\n"
+
+ # Detect issues
+ if echo "$output" | grep -q "Status: CRITICAL"; then
+ alert_text+="$region* path degraded!\n"
+ issue_found=1
+ fi
+done
+
+# Send alert only if something's wrong
+if [[ $issue_found -eq 1 ]]; then
+ send_telegram "$alert_text"
+else
+ echo "βœ… All paths healthy. No alert sent."
+fi
diff --git a/genesis_check.sh b/genesis_check.sh
new file mode 100755
index 0000000..d1ab702
--- /dev/null
+++ b/genesis_check.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+mkdir -p /var/log/genesis_uptime
+
+declare -A services=(
+ [radio]="https://genesis-radio.net"
+ [mastodon]="https://chatwithus.live"
+ [minio]="https://console.sshjunkie.com"
+ [azura]="https://portal.genesishostingtechnologies.com"
+ [teamtalk]="http://tt.themediahub.org"
+ [directadmin]="https://da.genesishostingtechnologies.com"
+)
+
+timestamp=$(date -u +"%Y-%m-%dT%H:%M:%S")
+
+for service in "${!services[@]}"
+do
+ url=${services[$service]}
+ curl --head --silent --max-time 10 "$url" >/dev/null
+ if [ $? -eq 0 ]; then
+ echo "$timestamp,up" >> "/var/log/genesis_uptime/$service.log"
+ else
+ echo "$timestamp,down" >> "/var/log/genesis_uptime/$service.log"
+ fi
+done
diff --git a/genesis_sync_progress.sh b/genesis_sync_progress.sh
new file mode 100755
index 0000000..8735e0e
--- /dev/null
+++ b/genesis_sync_progress.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# GenesisSync Progress Tracker - No hangs, no nonsense
+
+SOURCE="/mnt/raid5/minio-data/linodeassets"
+DEST="/assets/minio-data/mastodon"
+LOG="/root/genesis_sync_progress.log"
+INTERVAL=300 # in seconds
+
+mkdir -p $(dirname "$LOG")
+
+while true; do
+ TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
+
+ SRC_COUNT=$(rclone size "$SOURCE" --json | jq .objects)
+ DST_COUNT=$(rclone size "$DEST" --json | jq .objects)
+
+ if [[ -z "$SRC_COUNT" || -z "$DST_COUNT" ]]; then
+ echo "[$TIMESTAMP] Error getting file counts. Retrying in $INTERVAL seconds..." | tee -a "$LOG"
+ else
+ PERCENT=$(( DST_COUNT * 100 / SRC_COUNT ))
+ echo "[$TIMESTAMP] Synced: $DST_COUNT / $SRC_COUNT ($PERCENT%)" | tee -a "$LOG"
+ fi
+
+ sleep $INTERVAL
+done
diff --git a/get_telegram_id.sh b/get_telegram_id.sh
new file mode 100755
index 0000000..5e57d23
--- /dev/null
+++ b/get_telegram_id.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+read -p "Enter your Telegram Bot Token: " TOKEN
+
+echo "Fetching recent updates..."
+curl -s "https://api.telegram.org/bot$TOKEN/getUpdates" | jq '.result[].message.chat | {id, type, title, username, first_name}'
diff --git a/giteapushv3.sh b/giteapushv3.sh
new file mode 100755
index 0000000..c2eb40f
--- /dev/null
+++ b/giteapushv3.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+# Genesis Radio Git Auto-Push
+# With Auto-Retry if Push Fails
+
+# Move to the top of the git repo automatically
+cd "$(git rev-parse --show-toplevel)" || { echo "❌ Not inside a Git repo. Exiting."; exit 1; }
+
+# Log the current location
+echo "πŸ“‚ Working in $(pwd)"
+
+# Stage all changes (new, modified, deleted)
+git add -A
+
+# Check if there's anything to commit
+if ! git diff --cached --quiet; then
+ TIMESTAMP=$(date +"%Y-%m-%d %H:%M:%S")
+ git commit -m "Auto-commit from giteapush.sh at $TIMESTAMP"
+
+ echo "πŸ“‘ Attempting to push to origin/main..."
+
+ # Push with retry up to 3 times
+ tries=0
+ max_tries=3
+ until git push origin main; do
+ tries=$((tries+1))
+ if [ "$tries" -ge "$max_tries" ]; then
+ echo "❌ Push failed after $max_tries attempts. Manual intervention needed."
+ exit 1
+ fi
+ echo "⚠️ Push failed. Retrying ($tries/$max_tries) in 5 seconds..."
+ sleep 5
+ done
+
+ echo "βœ… Changes committed and pushed successfully at $TIMESTAMP"
+else
+ echo "ℹ️ No changes to commit."
+fi
+
+# Always show repo status at the end
+echo "πŸ“‹ Repo status:"
+git status -sb
diff --git a/hardenit.sh b/hardenit.sh
new file mode 100755
index 0000000..4859bd0
--- /dev/null
+++ b/hardenit.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+# harden_pyapps_box.sh - Secure the Genesis pyapps VM
+# Run as root or with sudo
+
+LOG_FILE="/var/log/genesis_pyapps_hardening.log"
+DATE=$(date '+%Y-%m-%d %H:%M:%S')
+echo -e "\nπŸ” Genesis pyapps VM Hardening - $DATE\n=====================================" | tee -a "$LOG_FILE"
+
+# 1. Lock unused system accounts
+LOCK_USERS=(daemon bin sys sync games man lp mail news uucp proxy www-data backup list irc gnats nobody systemd-network systemd-resolve systemd-timesync messagebus syslog _apt tss uuidd tcpdump usbmux sshd landscape pollinate fwupd-refresh dnsmasq cockpit-ws cockpit-wsinstance)
+for user in "${LOCK_USERS[@]}"; do
+ if id "$user" &>/dev/null; then
+ usermod -s /usr/sbin/nologin "$user" && echo "[+] Set nologin shell for $user" | tee -a "$LOG_FILE"
+ passwd -l "$user" &>/dev/null && echo "[+] Locked password for $user" | tee -a "$LOG_FILE"
+ fi
+done
+
+# 2. Enforce password policy for doc
+chage -M 90 -W 14 -I 7 doc && echo "[+] Set password expiration policy for doc" | tee -a "$LOG_FILE"
+
+# 3. SSH hardening
+sed -i 's/^#*PermitRootLogin.*/PermitRootLogin no/' /etc/ssh/sshd_config
+sed -i 's/^#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config
+systemctl restart sshd && echo "[+] SSH config hardened and restarted" | tee -a "$LOG_FILE"
+
+# 4. Install and configure Fail2ban
+apt-get install -y fail2ban
+cat <<EOF > /etc/fail2ban/jail.local
+[sshd]
+enabled = true
+port = ssh
+logpath = /var/log/auth.log
+maxretry = 4
+bantime = 1h
+findtime = 10m
+EOF
+systemctl restart fail2ban && echo "[+] Fail2ban installed and restarted" | tee -a "$LOG_FILE"
+
+# 5. Configure UFW
+ufw allow ssh
+# Example: allow specific ports for running screen tools
+# Adjust these as needed for your app ports
+ufw allow 5010/tcp # toot
+ufw allow 5011/tcp # toot2
+ufw allow 8020/tcp # archive list
+ufw allow 8021/tcp # archive console
+ufw allow 5000/tcp #phone
+ufw default deny incoming
+ufw default allow outgoing
+ufw enable
+
+echo "[+] UFW firewall rules applied" | tee -a "$LOG_FILE"
+
+# Done
+echo "βœ… pyapps hardening complete. Review log: $LOG_FILE"
+exit 0
diff --git a/honeypot_checker.sh b/honeypot_checker.sh
new file mode 100755
index 0000000..18033f4
--- /dev/null
+++ b/honeypot_checker.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+# Honeypot Self-Test Script for FailZero
+# Run this from Krang or any box with access to the FailZero honeypot.
+
+TARGET="$1"
+PORT=22
+USERNAME="admin"
+TESTFILE="/opt/genesis/krang_config.yaml"
+
+if [[ -z "$TARGET" ]]; then
+ echo "Usage: $0 <failzero_ip_or_hostname>"
+ exit 1
+fi
+
+echo "πŸ•΅οΈ Starting honeypot self-test against $TARGET"
+
+echo -e "\n[1/5] Scanning TCP port 22..."
+nmap -p $PORT "$TARGET" | grep "$PORT"
+
+echo -e "\n[2/5] Attempting SSH login to Cowrie..."
+# This will hang briefly, then fail β€” Cowrie captures it
+timeout 5s ssh -o StrictHostKeyChecking=no -p $PORT "$USERNAME@$TARGET" "echo test"
+
+echo -e "\n[3/5] Running fake commands to trigger logs..."
+timeout 5s ssh -o StrictHostKeyChecking=no -p $PORT "$USERNAME@$TARGET" "ls /; cat $TESTFILE; exit"
+
+echo -e "\n[4/5] Re-checking open port..."
+nmap -p $PORT "$TARGET" | grep "$PORT"
+
+echo -e "\n[5/5] Checking for log entries (if local)..."
+if [[ -f /home/cowrie/cowrie/var/log/cowrie/cowrie.log ]]; then
+ echo "β†’ Tail of Cowrie log:"
+ tail -n 5 /home/cowrie/cowrie/var/log/cowrie/cowrie.log
+else
+ echo "βœ“ If running remotely, check FailZero: /home/cowrie/cowrie/var/log/cowrie/cowrie.log"
+fi
+
+echo -e "\nβœ… Honeypot self-test complete.
+ - Cowrie should have captured a login + command attempt
+ - Check Telegram for alerts if enabled
+ - Check logs on FailZero for full details"
diff --git a/honeypot_selftest_pull.sh b/honeypot_selftest_pull.sh
new file mode 100755
index 0000000..d5a453c
--- /dev/null
+++ b/honeypot_selftest_pull.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+# Honeypot Self-Test Script for FailZero from Krang
+# Performs bait interaction + pulls Cowrie logs from FailZero for analysis
+
+TARGET="$1"
+SSH_USER="doc" # The remote user on FailZero (must be able to sudo or access Cowrie logs)
+REMOTE_LOG="/home/cowrie/cowrie/var/log/cowrie/cowrie.log"
+LOCAL_DIR="root/honeypot_logs"
+LOCAL_LOG="$LOCAL_DIR/$(date +%Y-%m-%d_%H-%M-%S)_cowrie.log"
+PORT=22
+USERNAME="root"
+TESTFILE="/opt/genesis/krang_config.yaml"
+
+if [[ -z "$TARGET" ]]; then
+ echo "Usage: $0 <failzero_ip_or_hostname>"
+ exit 1
+fi
+
+mkdir -p "$LOCAL_DIR"
+
+echo "πŸ•΅οΈ Starting honeypot self-test against $TARGET"
+
+echo -e "\n[1/6] Scanning TCP port 22..."
+nmap -p $PORT "$TARGET" | grep "$PORT"
+
+echo -e "\n[2/6] Attempting SSH login to Cowrie..."
+timeout 5s ssh -o StrictHostKeyChecking=no -p $PORT "$USERNAME@$TARGET" "echo test" || echo "(expected fake shell or timeout)"
+
+echo -e "\n[3/6] Running fake commands to trigger logs..."
+timeout 5s ssh -o StrictHostKeyChecking=no -p $PORT "$USERNAME@$TARGET" "ls /; cat $TESTFILE; exit" || echo "(command simulation complete)"
+
+echo -e "\n[4/6] Pulling Cowrie logs back to Krang..."
+scp "$SSH_USER@$TARGET:$REMOTE_LOG" "$LOCAL_LOG" >/dev/null 2>&1
+
+if [[ $? -eq 0 ]]; then
+ echo "βœ… Pulled Cowrie log to $LOCAL_LOG"
+else
+ echo "❌ Failed to retrieve Cowrie log. Check SSH user or path."
+fi
+
+echo -e "\n[5/6] Preview of last 5 log entries:"
+tail -n 5 "$LOCAL_LOG" 2>/dev/null || echo "(log file not found or unreadable)"
+
+echo -e "\n[6/6] Final port check:"
+nmap -p $PORT "$TARGET" | grep "$PORT"
+
+echo -e "\n🏁 Honeypot self-test complete."
diff --git a/kodakmoment.sh b/kodakmoment.sh
new file mode 100755
index 0000000..8176e3a
--- /dev/null
+++ b/kodakmoment.sh
@@ -0,0 +1,89 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+### CONFIG ###
+SOURCE_DIR="/home/doc/genesis-tools"
+DEST_HOST="root@backup.sshjunkie.com"
+DEST_PATH="/mnt/backup/images/genesis-tools"
+REMOTE_LATEST_LINK="$DEST_PATH/latest"
+RETENTION_DAYS=7
+
+# Timestamp-based vars (only when running a snapshot)
+TIMESTAMP=$(date +%F_%H-%M)
+SNAPSHOT_NAME="$TIMESTAMP"
+REMOTE_SNAP_DIR="$DEST_PATH/$SNAPSHOT_NAME"
+
+# --dry-run support
+DRY_RUN=""
+if [[ "${1:-}" == "--dry-run" ]]; then
+ echo "πŸ§ͺ Running in dry-run mode..."
+ DRY_RUN="--dry-run"
+fi
+
+# --list support
+if [[ "${1:-}" == "--list" ]]; then
+ echo "πŸ“‚ Available snapshots on $DEST_HOST:"
+ ssh "$DEST_HOST" "ls -1 $DEST_PATH | sort"
+ exit 0
+fi
+
+# --restore <timestamp> support
+if [[ "${1:-}" == "--restore" && -n "${2:-}" ]]; then
+ RESTORE_TIMESTAMP="$2"
+ RESTORE_REMOTE_PATH="$DEST_PATH/$RESTORE_TIMESTAMP"
+
+ echo "🧾 Restoring snapshot $RESTORE_TIMESTAMP from $DEST_HOST..."
+ ssh "$DEST_HOST" "[ -d '$RESTORE_REMOTE_PATH' ]" || {
+ echo "❌ Snapshot $RESTORE_TIMESTAMP does not exist."
+ exit 1
+ }
+
+ echo "⚠️ This will overwrite files in $SOURCE_DIR with those from snapshot."
+ read -rp "Continue? (y/n): " confirm
+ if [[ "$confirm" != "y" ]]; then
+ echo "❌ Restore cancelled."
+ exit 1
+ fi
+
+ rsync -a --delete -e ssh "$DEST_HOST:$RESTORE_REMOTE_PATH/" "$SOURCE_DIR/"
+ echo "βœ… Restore from $RESTORE_TIMESTAMP complete."
+ exit 0
+fi
+
+# Regular snapshot mode starts here
+# Verify source directory exists
+if [[ ! -d "$SOURCE_DIR" ]]; then
+ echo "❌ ERROR: Source directory $SOURCE_DIR does not exist."
+ exit 1
+fi
+
+# Make sure destination path exists on the remote
+echo "πŸ“‚ Ensuring remote path exists..."
+ssh "$DEST_HOST" "mkdir -p '$DEST_PATH'"
+
+# Determine whether to use --link-dest based on presence of 'latest'
+REMOTE_LD_OPTION=""
+if ssh "$DEST_HOST" "[ -e '$REMOTE_LATEST_LINK' ]"; then
+ REMOTE_LD_OPTION="--link-dest=$REMOTE_LATEST_LINK"
+else
+ echo "ℹ️ No 'latest' snapshot found β€” creating full backup."
+fi
+
+# Create snapshot via rsync with optional deduplication
+echo "πŸ“Έ Creating snapshot: $REMOTE_SNAP_DIR"
+rsync -a --delete \
+ --exclude="miscellaneous/kodakmoment.sh" \
+ $DRY_RUN \
+ $REMOTE_LD_OPTION \
+ -e ssh "$SOURCE_DIR/" "$DEST_HOST:$REMOTE_SNAP_DIR"
+
+# Only perform post-processing if not a dry-run
+if [[ -z "$DRY_RUN" ]]; then
+ echo "πŸ”— Updating 'latest' symlink..."
+ ssh "$DEST_HOST" "rm -f '$REMOTE_LATEST_LINK'; ln -s '$REMOTE_SNAP_DIR' '$REMOTE_LATEST_LINK'"
+
+ echo "🧹 Pruning snapshots older than $RETENTION_DAYS days..."
+ ssh "$DEST_HOST" "find '$DEST_PATH' -maxdepth 1 -type d -mtime +$RETENTION_DAYS -exec rm -rf {} +"
+fi
+
+echo "βœ… KodakMoment complete."
diff --git a/kodakmomentproxmox.sh b/kodakmomentproxmox.sh
new file mode 100755
index 0000000..4acbbb2
--- /dev/null
+++ b/kodakmomentproxmox.sh
@@ -0,0 +1,40 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+### CONFIG ###
+PROXMOX_HOST="root@38.102.127.162"
+VMIDS=(101 103 104 105 106 108)
+DEST_HOST="root@thevault.bounceme.net"
+DEST_PATH="/mnt/backup3/vzdump"
+TIMESTAMP=$(date +%F_%H-%M)
+RETENTION_DAYS=7
+
+echo "πŸ“¦ Starting selective VM backup via KodakMoment..."
+
+# Ensure base destination directory exists
+echo "πŸ“ Ensuring remote backup directory exists..."
+ssh "$DEST_HOST" "mkdir -p '$DEST_PATH'"
+
+for VMID in "${VMIDS[@]}"; do
+ if [[ "$VMID" == "101" ]]; then
+ echo "🎢 VM 101 is a music VM β€” using rsync instead of vzdump..."
+ ssh doc@portal.genesishostingtechnologies.com \
+ "rsync -avh /var/lib/docker/volumes/azuracast_station_data/_data/ $DEST_HOST:/mnt/backup3/azuracast/"
+ echo "βœ… Music files from VM 101 synced to thevault."
+ else
+ REMOTE_FILE="$DEST_PATH/vzdump-qemu-${VMID}-$TIMESTAMP.vma.zst"
+ echo "🧠 Streaming snapshot backup of VM $VMID to $REMOTE_FILE..."
+
+ ssh "$PROXMOX_HOST" \
+ "vzdump $VMID --mode snapshot --compress zstd --stdout --storage local-lvm" | \
+ ssh "$DEST_HOST" \
+ "cat > '$REMOTE_FILE'"
+
+ echo "βœ… VM $VMID streamed and saved to thevault."
+ fi
+done
+
+echo "🧹 Pruning old vzdump backups on thevault..."
+ssh "$DEST_HOST" "find '$DEST_PATH' -type f -mtime +$RETENTION_DAYS -name 'vzdump-qemu-*.zst' -delete"
+
+echo "βœ… KodakMoment complete β€” selective backups successful."
diff --git a/krang_backup.sh b/krang_backup.sh
new file mode 100755
index 0000000..45d1a34
--- /dev/null
+++ b/krang_backup.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+STAMP=$(date +%Y%m%d-%H%M%S)
+VAULT_HOST="root@thevault.sshjunkie.com"
+TG_BOT_TOKEN="8178867489:AAH0VjN7VnZSCIWasSz_y97iBLLjPJA751k"
+TG_CHAT_ID="1559582356"
+TG_API="https://api.telegram.org/bot$TG_BOT_TOKEN/sendMessage"
+
+# Source directories to back up
+SOURCE_DIRS=(
+ "/home/doc/genesis-tools/"
+
+)
+
+# Destination directories on the vault
+DEST_DIRS=(
+ "/nexus/krang_assets"
+
+)
+
+# Rsync commands to back up directories
+for i in "${!SOURCE_DIRS[@]}"; do
+ # Rsync to the vault (using SSH)
+ rsync -avz --delete "${SOURCE_DIRS[$i]}" "$VAULT_HOST:${DEST_DIRS[$i]}$STAMP/"
+
+ # Check if the rsync was successful and send a Telegram message
+ if [ $? -eq 0 ]; then
+ curl -s -X POST "$TG_API" -d chat_id="$TG_CHAT_ID" -d text="πŸ“¦ Krang backup complete for ${SOURCE_DIRS[$i]} β†’ ${DEST_DIRS[$i]}$STAMP"
+ else
+ curl -s -X POST "$TG_API" -d chat_id="$TG_CHAT_ID" -d text="⚠️ Krang backup failed for ${SOURCE_DIRS[$i]} β†’ ${DEST_DIRS[$i]}$STAMP"
+ fi
+done
diff --git a/krang_modular_health.sh b/krang_modular_health.sh
new file mode 100755
index 0000000..7b781e3
--- /dev/null
+++ b/krang_modular_health.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+
+# === CONFIG ===
+REMOTE_USER="doc"
+BOT_TOKEN="8178867489:AAH0VjN7VnZSCIWasSz_y97iBLLjPJA751k"
+CHAT_ID="1559582356"
+TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
+LOGFILE="$HOME/krang-logs/health-$(date '+%Y%m%d-%H%M').log"
+
+# Thresholds
+SWAP_LIMIT_MB=512
+LOAD_LIMIT=4.0
+
+mkdir -p "$HOME/krang-logs"
+
+SERVERS=(
+ thevault.sshjunkie.com
+ zcluster.technodrome1.sshjunkie.com
+ zcluster.technodrome2.sshjunkie.com
+ shredder.sshjunkie.com
+ chatwithus.live
+)
+
+SUMMARY="πŸ“‘ Krang System Health Report - $TIMESTAMP
+
+"
+
+for HOST in "${SERVERS[@]}"; do
+ echo "πŸ” Collecting from $HOST..."
+
+ DATA=$(ssh "$REMOTE_USER@$HOST" bash -s << 'EOF'
+HOST=$(hostname)
+MEM=$(awk '/MemAvailable/ {printf "%.1f Gi free", $2 / 1024 / 1024}' /proc/meminfo)
+SWAP_RAW=$(free -m | awk '/Swap:/ {print $3}')
+SWAP="$SWAP_RAW Mi used"
+DISK=$(df -h / | awk 'NR==2 {print $4 " free"}')
+LOAD=$(uptime | awk -F'load average:' '{print $2}' | cut -d, -f1 | xargs)
+UPTIME=$(uptime -p)
+
+# Optional service checks
+NGINX=$(systemctl is-active nginx 2>/dev/null)
+DOCKER=$(systemctl is-active docker 2>/dev/null)
+PGSQL=$(systemctl is-active postgresql 2>/dev/null || systemctl is-active postgresql@14-main 2>/dev/null)
+
+echo "$HOST|$MEM|$SWAP_RAW|$SWAP|$DISK|$LOAD|$UPTIME|$NGINX|$DOCKER|$PGSQL"
+EOF
+)
+
+ IFS='|' read -r H MEM SWAP_MB SWAP_HUMAN DISK LOAD1 UPTIME_STATUS NGINX_STATUS DOCKER_STATUS PGSQL_STATUS <<< "$DATA"
+
+ ALERTS=""
+ if (( SWAP_MB > SWAP_LIMIT_MB )); then
+ ALERTS+="⚠️ HIGH SWAP ($SWAP_HUMAN)
+"
+ fi
+
+ LOAD_INT=$(awk "BEGIN {print ($LOAD1 > $LOAD_LIMIT) ? 1 : 0}")
+ if [ "$LOAD_INT" -eq 1 ]; then
+ ALERTS+="⚠️ HIGH LOAD ($LOAD1)
+"
+ fi
+
+ [ "$NGINX_STATUS" != "active" ] && ALERTS+="❌ NGINX not running
+"
+ [ "$DOCKER_STATUS" != "active" ] && ALERTS+="❌ Docker not running
+"
+ [ "$PGSQL_STATUS" != "active" ] && ALERTS+="❌ PostgreSQL not running
+"
+
+ ALERTS_MSG=""
+ [ -n "$ALERTS" ] && ALERTS_MSG="🚨 ALERTS:
+$ALERTS"
+
+ SUMMARY+="πŸ–₯️ $H
+β€’ Mem: $MEM
+β€’ Swap: $SWAP_HUMAN
+β€’ Disk: $DISK
+β€’ Load: $LOAD1
+β€’ Uptime: $UPTIME_STATUS
+$ALERTS_MSG
+"
+done
+
+# === KRANG CLOCK ACCURACY CHECK ===
+NTP_RESULT=$(ntpdate -q time.google.com 2>&1)
+OFFSET=$(echo "$NTP_RESULT" | awk '/offset/ {print $10}')
+OFFSET_MS=$(awk "BEGIN {printf "%.0f", $OFFSET * 1000}")
+
+if [[ -n "$OFFSET_MS" ]]; then
+ if (( OFFSET_MS > 500 || OFFSET_MS < -500 )); then
+ # Auto-correct the system clock
+ CORRECTION=$(ntpdate -u time.google.com 2>&1)
+ SUMMARY+="πŸ› οΈ Auto-corrected Krang clock via ntpdate: $CORRECTION
+"
+ SUMMARY+="πŸ•°οΈ Krang Clock Offset: ${OFFSET_MS}ms β€” ⚠️ OUT OF SYNC
+"
+ else
+ SUMMARY+="πŸ•°οΈ Krang Clock Offset: ${OFFSET_MS}ms β€” βœ… SYNCHRONIZED
+"
+ fi
+else
+ SUMMARY+="πŸ•°οΈ Krang Clock Check: ❌ FAILED to retrieve offset.
+"
+fi
+
+# Log to file
+echo -e "$SUMMARY" > "$LOGFILE"
+
+# Send to Telegram
+curl -s -X POST https://api.telegram.org/bot$BOT_TOKEN/sendMessage \
+ -d chat_id="$CHAT_ID" \
+ -d text="$SUMMARY"
diff --git a/linux_masto.sh b/linux_masto.sh
new file mode 100755
index 0000000..3e8c92f
--- /dev/null
+++ b/linux_masto.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+# Posts a Linux tip via Mastodon API, with fallback and logging
+
+INSTANCE="https://chatwithus.live" # <-- Change this!
+TOKEN_FILE="/home/doc/genesis-tools/miscellaneous/bash/bin/mastodon_token.secret"
+TIP_API="https://linuxtldr.com/api/tips/random"
+FALLBACK_FILE="/home/doc/linux_tips.txt"
+LOG_FILE="/home/doc/linux_masto_post.log"
+
+# Load access token
+if [[ ! -f "$TOKEN_FILE" ]]; then
+ echo "Missing access token at $TOKEN_FILE"
+ exit 1
+fi
+ACCESS_TOKEN=$(<"$TOKEN_FILE")
+
+# Function to log and exit
+fail_and_log() {
+ echo "$(date): ERROR - $1" >> "$LOG_FILE"
+ echo "β›” $1"
+ exit 1
+}
+
+# Try fetching a tip from API
+response=$(curl -s "$TIP_API")
+
+if echo "$response" | jq . >/dev/null 2>&1; then
+ title=$(echo "$response" | jq -r '.title')
+ tip=$(echo "$response" | jq -r '.tip')
+ url=$(echo "$response" | jq -r '.url')
+ POST="πŸ“˜ *Linux Tip of the Day*\n\n$title\n\n$tip\n\nπŸ”— More: $url\n\n#Linux #CommandLine #SysAdmin"
+else
+ # API failed, use fallback
+ if [[ ! -f "$FALLBACK_FILE" ]]; then
+ fail_and_log "Both API and fallback file failed. No tips to post."
+ fi
+ POST="πŸ“˜ *Linux Tip of the Day*\n\n$(shuf -n 1 "$FALLBACK_FILE")\n\n#Linux #CommandLine #SysAdmin"
+ echo "$(date): Used fallback tip." >> "$LOG_FILE"
+fi
+
+# Post to Mastodon
+resp=$(curl -s -X POST "$INSTANCE/api/v1/statuses" \
+ -H "Authorization: Bearer $ACCESS_TOKEN" \
+ -d "status=$POST" \
+ -d "visibility=public")
+
+# Check response
+if echo "$resp" | grep -q '"id":'; then
+ echo "$(date): βœ… Posted successfully: $(echo "$POST" | head -n 1)" >> "$LOG_FILE"
+else
+ fail_and_log "Post failed. Response: $resp"
+fi
diff --git a/malips.sh b/malips.sh
new file mode 100755
index 0000000..4929e68
--- /dev/null
+++ b/malips.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+# Path to Snort's alert log (snort.alert.fast)
+SNORT_LOG="/var/log/snort/snort.alert.fast"
+
+# Database connection details
+DB_HOST="zcluster.technodrome1.sshjunkie.com"
+DB_USER="ipblocks_user"
+DB_PASS="rusty2281"
+DB_NAME="ipblocks"
+
+# Function to insert blocked IP into the PostgreSQL database
+block_ip() {
+ local ip=$1
+
+ # Remove port if included in the IP
+ ip=${ip%%:*}
+
+ # Insert the blocked IP into the PostgreSQL database (into the blocked_ip_log table)
+ PGPASSWORD="$DB_PASS" psql -U "$DB_USER" -h "$DB_HOST" -d "$DB_NAME" -c "INSERT INTO blocked_ip_log (ip_address) VALUES ('$ip');"
+
+ # Optionally print to confirm the insertion
+ echo "Blocked IP $ip inserted into the database log."
+}
+
+# Ensure the log file exists and is readable
+if [ ! -f "$SNORT_LOG" ]; then
+ echo "Snort log file not found!"
+ exit 1
+fi
+
+# Monitor the snort.alert.fast file for new malicious IPs
+tail -F "$SNORT_LOG" | while read line; do
+ # Debug: Output the full line from Snort log
+ echo "Processing: $line"
+
+ # Extract source and destination IP addresses from Snort logs
+ if echo "$line" | grep -q "ICMP PING NMAP"; then
+ # Extract source IP (before "->")
+ ip=$(echo "$line" | awk -F' -> ' '{print $1}' | awk '{print $NF}' | cut -d':' -f1)
+ echo "Found Source IP: $ip" # Debug: Show the IP being extracted
+ block_ip "$ip"
+ elif echo "$line" | grep -q "EXPLOIT"; then
+ # Extract source IP (before "->")
+ ip=$(echo "$line" | awk -F' -> ' '{print $1}' | awk '{print $NF}' | cut -d':' -f1)
+ echo "Found Source IP: $ip" # Debug: Show the IP being extracted
+ block_ip "$ip"
+ fi
+done
diff --git a/mastodon_restart.sh b/mastodon_restart.sh
new file mode 100755
index 0000000..0f407d8
--- /dev/null
+++ b/mastodon_restart.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+
+# === CONFIG ===
+SERVER="root@chatwithus.live"
+MASTODON_INSTANCE="https://chatwithus.live"
+ACCESS_TOKEN="07w3Emdw-cv_TncysrNU8Ed_sHJhwtnvKmnLqKlHmKA"
+
+TOOT_VISIBILITY="public"
+
+WARNING_TOOT_2M="🚨 Heads up! We’ll be restarting ChatWithUs.Live in about 2 minutes to perform routine maintenance and keep things running smoothly. Please wrap up anything important and hang tight β€” we’ll be right back."
+WARNING_TOOT_1M="⚠️ Just one more minute until we restart the server. If you’re in the middle of something, now’s the time to save and log out. Thanks for your patience while we keep the gears turning!"
+
+FINAL_TOOT="βœ… ChatWithUs.Live services restarted from Krang via OPS script."
+
+TELEGRAM_BOT_TOKEN="8178867489:AAH0VjN7VnZSCIWasSz_y97iBLLjPJA751k"
+TELEGRAM_CHAT_ID="1559582356"
+TELEGRAM_TEXT="βœ… Mastodon has been restarted by Krang. All services are back online."
+
+LOG_FILE="/home/doc/genesis-tools/masto_restart.log"
+TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
+
+{
+echo "[$TIMESTAMP] === Mastodon Restart Initiated ==="
+
+# === Post 2-Minute Warning Toot ===
+echo "[*] Posting 2-minute warning to Mastodon..."
+curl -s -X POST "$MASTODON_INSTANCE/api/v1/statuses" \
+ -H "Authorization: Bearer $ACCESS_TOKEN" \
+ -d "status=$WARNING_TOOT_2M" \
+ -d "visibility=$TOOT_VISIBILITY" > /dev/null && echo "[βœ“] 2-minute warning posted."
+
+# === Wait 1 minute ===
+sleep 60
+
+# === Post 1-Minute Warning Toot ===
+echo "[*] Posting 1-minute warning to Mastodon..."
+curl -s -X POST "$MASTODON_INSTANCE/api/v1/statuses" \
+ -H "Authorization: Bearer $ACCESS_TOKEN" \
+ -d "status=$WARNING_TOOT_1M" \
+ -d "visibility=$TOOT_VISIBILITY" > /dev/null && echo "[βœ“] 1-minute warning posted."
+
+# === Wait 1 more minute ===
+sleep 60
+
+# === Restart Mastodon Services ===
+echo "[*] Connecting to $SERVER to restart Mastodon services..."
+
+ssh "$SERVER" bash << 'EOF'
+echo "Restarting mastodon-web..."
+systemctl restart mastodon-web
+
+echo "Restarting mastodon-sidekiq..."
+systemctl restart mastodon-sidekiq
+
+echo "Restarting mastodon-streaming..."
+systemctl restart mastodon-streaming
+
+echo "All services restarted."
+EOF
+
+# === Wait Until Mastodon API is Responsive ===
+echo "[*] Waiting for Mastodon to come back online..."
+until curl -sf "$MASTODON_INSTANCE/api/v1/instance" > /dev/null; do
+ echo " ... still starting up, retrying in 5s"
+ sleep 5
+done
+
+echo "[+] Mastodon is back online."
+
+# === Post Final Toot ===
+echo "[*] Posting final status to Mastodon..."
+curl -s -X POST "$MASTODON_INSTANCE/api/v1/statuses" \
+ -H "Authorization: Bearer $ACCESS_TOKEN" \
+ -d "status=$FINAL_TOOT" \
+ -d "visibility=$TOOT_VISIBILITY" > /dev/null && echo "[βœ“] Final status posted."
+
+# === Telegram Notification ===
+echo "[*] Sending Telegram alert..."
+curl -s -X POST "https://api.telegram.org/bot${TELEGRAM_BOT_TOKEN}/sendMessage" \
+ -d chat_id="$TELEGRAM_CHAT_ID" \
+ -d text="$TELEGRAM_TEXT" > /dev/null && echo "[βœ“] Telegram alert sent."
+
+echo "[βœ“] All tasks complete. Logged out of $SERVER."
+echo "[$TIMESTAMP] === Mastodon Restart Complete ==="
+echo ""
+
+} >> "$LOG_FILE" 2>&1
diff --git a/mastodon_status-check.sh b/mastodon_status-check.sh
new file mode 100755
index 0000000..046ab83
--- /dev/null
+++ b/mastodon_status-check.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+echo "Step 0: Starting script..."
+
+# Load token from ~/.mastodon-token or environment
+TOKEN_FILE="$HOME/.mastodon-token"
+if [ -f "$TOKEN_FILE" ]; then
+ export MASTO_TOKEN=$(cat "$TOKEN_FILE")
+fi
+
+if [ -z "$MASTO_TOKEN" ]; then
+ echo "❌ No Mastodon access token found. Set \$MASTO_TOKEN or create ~/.mastodon-token"
+ exit 1
+fi
+
+echo "Step 1: Token loaded."
+
+TMPFILE=$(mktemp)
+MASTO_API="https://chatwithus.live/api/v1/statuses"
+
+SERVICES=(
+ "Genesis Radio|https://genesis-radio.net"
+ "Mastodon|https://chatwithus.live"
+ "MinIO|https://console.sshjunkie.com"
+ "AzuraCast|portal.genesishostingtechnologies.com/login"
+ "TeamTalk|tcp://tt.themediahub.org:10442"
+ "DirectAdmin|https://da.genesishostingtechnologies.com"
+)
+
+echo "[Status Check @ $(date -u '+%H:%M %Z')]" > "$TMPFILE"
+
+for service in "${SERVICES[@]}"; do
+ IFS="|" read -r NAME URL <<< "$service"
+
+ if [[ $URL == tcp://* ]]; then
+ # Handle TCP port check
+ HOSTPORT=${URL#tcp://}
+ HOST=${HOSTPORT%%:*}
+ PORT=${HOSTPORT##*:}
+ echo "Checking TCP: $NAME on $HOST:$PORT"
+ timeout 5 bash -c "</dev/tcp/$HOST/$PORT" &>/dev/null
+ else
+ # Handle HTTP(S) check
+ echo "Checking HTTP: $NAME -> $URL"
+ curl -s --head --fail --max-time 5 "$URL" >/dev/null
+ fi
+
+ if [ $? -eq 0 ]; then
+ echo "βœ… $NAME: Online" >> "$TMPFILE"
+ else
+ echo "❌ $NAME: Offline" >> "$TMPFILE"
+ fi
+done
+
+echo "Step 2: Results collected."
+cat "$TMPFILE"
+
+# Convert newlines to URL-encoded format
+POST_BODY=$(sed ':a;N;$!ba;s/\n/%0A/g' "$TMPFILE")
+
+echo "Step 3: Posting to Mastodon..."
+
+curl -s -X POST "$MASTO_API" \
+ -H "Authorization: Bearer $MASTO_TOKEN" \
+ -d "status=$POST_BODY"
+
+echo "Step 4: Done."
+
+rm -f "$TMPFILE"
diff --git a/mastodon_token.secret b/mastodon_token.secret
new file mode 100755
index 0000000..1fce226
--- /dev/null
+++ b/mastodon_token.secret
@@ -0,0 +1,3 @@
+# Replace with your actual token
+echo "1mHEd07LOJ3igmXXy6zhR18ADhQDOs1wT7YXklejh8E" > /home/doc/genesis-tools/miscellaneous/bash/bin/mastodon.token.secret
+chmod 600 /home/doc/genesis-tools/miscellaneous/bash/bin/mastodon_token.secret
diff --git a/migrationtoblock.sh b/migrationtoblock.sh
new file mode 100755
index 0000000..3cc75fe
--- /dev/null
+++ b/migrationtoblock.sh
@@ -0,0 +1,72 @@
+#!/bin/bash
+
+# === CONFIG ===
+SRC="/mnt/raid5/minio-data/linodeassets"
+DST="/mnt/mastodon-assets"
+MOUNTPOINT="/home/mastodon/live/public/system"
+LOGFILE="/var/log/mastodon_asset_migration_$(date +%Y%m%d_%H%M%S).log"
+
+log() {
+ echo "[$(date '+%F %T')] $*" | tee -a "$LOGFILE"
+}
+
+verify_sync() {
+ local src_count=$(find "$SRC" -type f | wc -l)
+ local dst_count=$(find "$DST" -type f | wc -l)
+ local src_bytes=$(du -sb "$SRC" | awk '{print $1}')
+ local dst_bytes=$(du -sb "$DST" | awk '{print $1}')
+
+ echo "--- Verification Results ---" | tee -a "$LOGFILE"
+ echo "Files: $src_count β†’ $dst_count" | tee -a "$LOGFILE"
+ echo "Bytes: $src_bytes β†’ $dst_bytes" | tee -a "$LOGFILE"
+
+ if [[ "$src_count" -ne "$dst_count" || "$src_bytes" -ne "$dst_bytes" ]]; then
+ echo "❌ MISMATCH detected. Please review the rsync log." | tee -a "$LOGFILE"
+ else
+ echo "βœ… Verified: source and destination match." | tee -a "$LOGFILE"
+ fi
+ echo "---------------------------" | tee -a "$LOGFILE"
+}
+
+# === PHASE 1: Live Sync ===
+log "πŸš€ Starting Phase 1: Live rsync"
+rsync -aAXv --progress "$SRC/" "$DST/" | tee -a "$LOGFILE"
+
+# === Stop Mastodon ===
+log "πŸ›‘ Stopping Mastodon services..."
+systemctl stop mastodon-web mastodon-sidekiq mastodon-streaming || {
+ log "❌ Failed to stop Mastodon services"; exit 1;
+}
+
+# === PHASE 2: Final Sync ===
+log "πŸ” Starting Phase 2: Final rsync with --delete"
+rsync -aAXv --delete "$SRC/" "$DST/" | tee -a "$LOGFILE"
+
+# === Bind Mount Cutover ===
+log "πŸ”— Swapping in block storage as $MOUNTPOINT"
+if [[ -d "$MOUNTPOINT" ]]; then
+ mv "$MOUNTPOINT" "${MOUNTPOINT}.bak" || {
+ log "❌ Could not move existing mountpoint"; exit 1;
+ }
+fi
+
+mkdir -p "$MOUNTPOINT"
+mount --bind "$DST" "$MOUNTPOINT"
+grep -q "$MOUNTPOINT" /etc/fstab || echo "$DST $MOUNTPOINT none bind 0 0" >> /etc/fstab
+log "[βœ“] Bind mount active and persisted"
+
+# === Permissions ===
+log "πŸ”§ Fixing permissions on $DST"
+chown -R mastodon:mastodon "$DST"
+
+# === Restart Mastodon ===
+log "πŸš€ Restarting Mastodon services..."
+systemctl start mastodon-web mastodon-sidekiq mastodon-streaming || {
+ log "❌ Failed to restart Mastodon services"; exit 1;
+}
+
+# === VERIFY ===
+log "πŸ§ͺ Verifying file count and byte totals"
+verify_sync
+
+log "πŸŽ‰ Migration completed successfully. Mastodon is live on block storage."
diff --git a/p1.sh b/p1.sh
new file mode 100755
index 0000000..0213a11
--- /dev/null
+++ b/p1.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+# Configuration
+SRC_DIR="/home/mastodon/live"
+DEST_DIR="/home/mastodon/backup"
+PG_DB_NAME="mastodon_production"
+PG_USER="mastodon"
+PG_HOST="38.102.127.174" # Use database IP
+PG_PORT="5432"
+TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
+BACKUP_DIR="${DEST_DIR}/mastodon_backup" # Removed the timestamp here for simplicity
+LOG_FILE="$(pwd)/migration_checklist_${TIMESTAMP}.log" # Create log file in the same directory
+REMOTE_USER="root"
+REMOTE_HOST="38.102.127.167" # New server IP
+REMOTE_DIR="/home/mastodon"
+
+# Initialize the log file
+echo "Migration checklist for real run on $(date)" > $LOG_FILE
+echo "========================================" >> $LOG_FILE
+
+# Step 1: Ensure necessary directories exist on the new server
+echo "Checking if 'mastodon' user exists..." >> $LOG_FILE
+id -u mastodon &>/dev/null || useradd -m mastodon
+
+echo "Ensuring backup and log directories exist..." >> $LOG_FILE
+mkdir -p /home/mastodon/mastodon_backup
+mkdir -p /home/mastodon/logs
+
+echo "Ensuring mastodon directory exists on remote server..." >> $LOG_FILE
+mkdir -p "$DEST_DIR/mastodon_backup"
+
+# Step 2: Check if the database is reachable
+echo "Checking if the database is reachable..." >> $LOG_FILE
+psql -U $PG_USER -h $PG_HOST -d $PG_DB_NAME -c 'SELECT 1;' || { echo "Database connection failed" >> $LOG_FILE; exit 1; }
+
+# Step 3: Check if S3 storage is reachable
+echo "Checking if S3 storage is reachable..." >> $LOG_FILE
+curl --silent --head --fail 'https://chatwithus-live.us-east-1.linodeobjects.com' || echo 'S3 storage is not reachable' >> $LOG_FILE
+
+# Step 4: Transfer files and directories
+echo "Starting backup transfer..." >> $LOG_FILE
+
+# Ensure the destination directory exists
+mkdir -p $BACKUP_DIR
+
+# Transfer Mastodon files from old server
+rsync -avz --delete $SRC_DIR $BACKUP_DIR/mastodon_files # The '-z' flag compresses the data during transfer
+
+# Transfer Nginx config
+rsync -avz /etc/nginx $BACKUP_DIR/nginx_configs # Added compression for Nginx config transfer
+
+# Backup PostgreSQL database
+echo "Backing up PostgreSQL database..." >> $LOG_FILE
+pg_dump -U $PG_USER -d $PG_DB_NAME > "$DEST_DIR/mastodon_db.sql"
+
+# Ensure the backup directory is created (to be safe)
+mkdir -p "$DEST_DIR/mastodon_backup"
+
+# Compress the backup directory with tar (to reduce size)
+echo "Creating backup archive..." >> $LOG_FILE
+tar -czf "$DEST_DIR/mastodon_backup.tar.gz" -C "$DEST_DIR" mastodon_backup # Compress the backup directory
+
+# Step 5: Transfer backup to new server
+echo "Transferring backup to new server..." >> $LOG_FILE
+rsync -avz ${DEST_DIR}/mastodon_backup.tar.gz ${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_DIR} # Using compression during transfer
+
+# Step 6: Remove local compressed backup file
+rm ${DEST_DIR}/mastodon_backup.tar.gz
+
+# Step 7: Move log files to /home/mastodon/logs
+mv $LOG_FILE /home/mastodon/logs/backup_${TIMESTAMP}.log
+
+# End of Part 1: Setup, checks, and transfer.
+echo "Step 1-7 completed. Proceed with Part 2 to install Glitch-Soc." >> $LOG_FILE
diff --git a/p2.sh b/p2.sh
new file mode 100755
index 0000000..0b6f599
--- /dev/null
+++ b/p2.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+# Function to print dry-run actions and log them
+dry_run_echo() {
+ if [ "$DRY_RUN" = true ]; then
+ echo "Dry run: $1"
+ else
+ eval $1
+ STATUS=$?
+ if [ $STATUS -eq 0 ]; then
+ echo "Success: $1"
+ else
+ echo "Failure: $1"
+ echo "$1 failed" >> "$LOG_FILE"
+ exit 1 # Optionally exit on failure
+ fi
+ fi
+}
+# Configuration
+REMOTE_USER="root"
+REMOTE_HOST="38.102.127.167" # New server IP
+REMOTE_DIR="/home/mastodon"
+PG_DB_NAME="mastodon_production"
+PG_USER="mastodon"
+PG_HOST="38.102.127.174"
+PG_PORT="5432"
+DRY_RUN=false # Set to true for dry-run, false for actual migration
+LOG_FILE="$(pwd)/migration_checklist_${TIMESTAMP}.log" # Reuse the same log file
+
+# Check if a dry run is requested
+if [[ "$1" == "--dry-run" ]]; then
+ DRY_RUN=true
+ echo "Dry run mode activated."
+else
+ echo "Running the migration for real."
+fi
+
+# Step 1: Install Glitch-Soc dependencies on the new server
+dry_run_echo "Installing dependencies for Glitch-Soc on the new server..."
+dry_run_echo "ssh root@${REMOTE_HOST} 'apt update && apt upgrade -y && apt install -y git curl wget vim unzip sudo build-essential libpq-dev libssl-dev libreadline-dev zlib1g-dev libyaml-dev libcurl4-openssl-dev libffi-dev libgdbm-dev nginx postgresql postgresql-contrib nodejs yarn ruby-full certbot python3-certbot-nginx'"
+
+# Step 2: Clone Glitch-Soc and install
+dry_run_echo "Cloning Glitch-Soc repository..."
+dry_run_echo "ssh root@${REMOTE_HOST} 'git clone https://github.com/glitch-soc/glitch-soc.git /home/mastodon/live'"
+
+dry_run_echo "Installing Mastodon dependencies on the new server..."
+dry_run_echo "ssh root@${REMOTE_HOST} 'cd /home/mastodon/live && bundle install --deployment'"
+
+dry_run_echo "Running Mastodon asset precompilation..."
+dry_run_echo "ssh root@${REMOTE_HOST} 'cd /home/mastodon/live && RAILS_ENV=production bundle exec rake assets:precompile'"
+
+dry_run_echo "Setting up Mastodon services..."
+dry_run_echo "ssh root@${REMOTE_HOST} 'systemctl enable mastodon-web mastodon-sidekiq mastodon-streaming && systemctl start mastodon-web mastodon-sidekiq mastodon-streaming'"
+
+# Step 3: Test if Mastodon and Nginx are running correctly
+dry_run_echo "Checking if Nginx and Mastodon are running..."
+dry_run_echo "ssh root@${REMOTE_HOST} 'curl --silent --head --fail http://localhost' || echo 'Nginx or Mastodon is not responding'"
+dry_run_echo "ssh root@${REMOTE_HOST} 'ps aux | grep mastodon' || echo 'Mastodon process is not running'"
+dry_run_echo "ssh root@${REMOTE_HOST} 'systemctl status nginx' || echo 'Nginx is not running'"
+
+# Step 4: Test Database and S3 access
+dry_run_echo "Verifying database and object storage access on the new server..."
+dry_run_echo "ssh root@${REMOTE_HOST} 'psql -U mastodon -h $PG_HOST -d $PG_DB_NAME -c \"SELECT 1;\"' || echo 'Database connection failed'"
+dry_run_echo "ssh root@${REMOTE_HOST} 'curl --silent --head --fail \"https://chatwithus-live.us-east-1.linodeobjects.com\"' || echo 'S3 storage is not reachable'"
+
+# Step 5: Clean up backup directories
+dry_run_echo "Cleaning up backup directory on the new server..."
+dry_run_echo "ssh root@${REMOTE_HOST} 'rm -rf /home/mastodon/backup/*'"
+
+# Step 6: Final Check
+dry_run_echo "Final check: Ensure DNS is updated and pointing to new IP."
+dry_run_echo "Check DNS configuration and ensure it points to $REMOTE_HOST."
+
+echo "Migration (Part 2) completed."
diff --git a/perms.sh b/perms.sh
new file mode 100755
index 0000000..30ba756
--- /dev/null
+++ b/perms.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Safe and resumable chmod script with progress output
+
+TARGET_DIR="/mnt/raid5"
+LOGFILE="$HOME/chmod_resume_$(date '+%Y%m%d-%H%M').log"
+INTERVAL=500
+
+echo "πŸ”§ Starting permission normalization on $TARGET_DIR"
+echo "Logging to $LOGFILE"
+echo "Started at $(date)" >> "$LOGFILE"
+
+i=0
+find "$TARGET_DIR" -type d -not -perm -005 | while read -r dir; do
+ chmod o+X "$dir"
+ echo "βœ”οΈ $dir" >> "$LOGFILE"
+ ((i++))
+ if ((i % INTERVAL == 0)); then
+ echo "⏳ Processed $i directories so far..."
+ fi
+done
+
+echo "βœ… Completed at $(date)" >> "$LOGFILE"
+echo "βœ… chmod finished. Total: $i directories."
diff --git a/pull_health_everywhere_ntp.sh b/pull_health_everywhere_ntp.sh
new file mode 100755
index 0000000..3ae6ebd
--- /dev/null
+++ b/pull_health_everywhere_ntp.sh
@@ -0,0 +1,101 @@
+#!/bin/bash
+
+# === CONFIG ===
+REMOTE_USER="doc"
+BOT_TOKEN="8178867489:AAH0VjN7VnZSCIWasSz_y97iBLLjPJA751k"
+CHAT_ID="1559582356"
+TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
+LOGFILE="$HOME/krang-logs/health-$(date '+%Y%m%d-%H%M').log"
+
+# Thresholds
+SWAP_LIMIT_MB=512
+LOAD_LIMIT=4.0
+
+mkdir -p "$HOME/krang-logs"
+
+SERVERS=(
+ thevault.sshjunkie.com
+ zcluster.technodrome1.sshjunkie.com
+ zcluster.technodrome2.sshjunkie.com
+ shredder.sshjunkie.com
+ chatwithus.live
+)
+
+SUMMARY="πŸ“‘ Krang System Health Report - $TIMESTAMP
+
+"
+
+for HOST in "${SERVERS[@]}"; do
+ echo "πŸ” Collecting from $HOST..."
+
+ DATA=$(ssh "$REMOTE_USER@$HOST" bash -s << 'EOF'
+HOST=$(hostname)
+MEM=$(free -h | awk '/Mem:/ {print $4 " free"}')
+SWAP_RAW=$(free -m | awk '/Swap:/ {print $3}')
+SWAP="$SWAP_RAW Mi used"
+DISK=$(df -h / | awk 'NR==2 {print $4 " free"}')
+LOAD=$(uptime | awk -F'load average:' '{print $2}' | cut -d, -f1 | xargs)
+APACHE=$(systemctl is-active apache2 2>/dev/null || systemctl is-active httpd 2>/dev/null)
+[ "$APACHE" = "active" ] && APACHE_STATUS="βœ… Apache running" || APACHE_STATUS="❌ Apache not running"
+
+echo "$HOST|$MEM|$SWAP_RAW|$SWAP|$DISK|$LOAD|$APACHE_STATUS"
+EOF
+)
+
+ IFS='|' read -r H MEM SWAP_MB SWAP_HUMAN DISK LOAD1 APACHE_STATUS <<< "$DATA"
+
+ ALERTS=""
+ if (( SWAP_MB > SWAP_LIMIT_MB )); then
+ ALERTS+="⚠️ HIGH SWAP ($SWAP_HUMAN)
+"
+ fi
+
+ LOAD_INT=$(awk "BEGIN {print ($LOAD1 > $LOAD_LIMIT) ? 1 : 0}")
+ if [ "$LOAD_INT" -eq 1 ]; then
+ ALERTS+="⚠️ HIGH LOAD ($LOAD1)
+"
+ fi
+
+ ALERTS_MSG=""
+ [ -n "$ALERTS" ] && ALERTS_MSG="🚨 ALERTS:
+$ALERTS"
+
+ SUMMARY+="πŸ–₯️ $H
+β€’ Mem: $MEM
+β€’ Swap: $SWAP_HUMAN
+β€’ Disk: $DISK
+β€’ Load: $LOAD1
+β€’ $APACHE_STATUS
+$ALERTS_MSG
+
+"
+done
+
+# === KRANG CLOCK ACCURACY CHECK ===
+NTP_RESULT=$(ntpdate -q time.google.com 2>&1)
+OFFSET=$(echo "$NTP_RESULT" | awk '/offset/ {print $10}')
+OFFSET_MS=$(awk "BEGIN {printf "%.0f", $OFFSET * 1000}")
+
+if [[ -n "$OFFSET_MS" ]]; then
+ if (( OFFSET_MS > 500 || OFFSET_MS < -500 )); then
+ # Auto-correct the system clock
+ CORRECTION=$(ntpdate -u time.google.com 2>&1)
+ SUMMARY+="πŸ› οΈ Auto-corrected Krang clock via ntpdate: $CORRECTION
+"
+
+ SUMMARY+="πŸ•°οΈ Krang Clock Offset: ${OFFSET_MS}ms β€” ⚠️ OUT OF SYNC
+"
+ else
+ SUMMARY+="πŸ•°οΈ Krang Clock Offset: ${OFFSET_MS}ms β€” βœ… SYNCHRONIZED
+"
+ fi
+else
+ SUMMARY+="πŸ•°οΈ Krang Clock Check: ❌ FAILED to retrieve offset.
+"
+fi
+
+# Log to file
+echo -e "$SUMMARY" > "$LOGFILE"
+
+# Send to Telegram
+curl -s -X POST https://api.telegram.org/bot$BOT_TOKEN/sendMessage -d chat_id="$CHAT_ID" -d text="$SUMMARY"
diff --git a/pushandbuild.sh b/pushandbuild.sh
new file mode 100755
index 0000000..9f25c1c
--- /dev/null
+++ b/pushandbuild.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+# push_and_build.sh β€” Build Python app on Shredder w/o installing or bundling .env
+
+read -p "Enter the name of the app (folder name, e.g., radiotoot): " APP_NAME
+read -p "Enter the main script filename (e.g., app.py): " MAIN_SCRIPT
+
+REMOTE_HOST="shredder.sshjunkie.com"
+REMOTE_BASE="/assets/clientapps"
+LOCAL_PATH="/home/doc/$APP_NAME"
+REMOTE_PATH="$REMOTE_BASE/$APP_NAME"
+
+# Double-check local folder
+if [ ! -d "$LOCAL_PATH" ]; then
+ echo "[-] Local app path $LOCAL_PATH does not exist."
+ exit 1
+fi
+
+echo "[*] Syncing $APP_NAME to $REMOTE_HOST (excluding .env and venv)..."
+rsync -av --exclude 'venv' --exclude 'dist' --exclude '__pycache__' \
+ --exclude '*.spec' --exclude '.env' \
+ "$LOCAL_PATH/" "doc@$REMOTE_HOST:$REMOTE_PATH/"
+
+echo "[*] Triggering remote build on $REMOTE_HOST..."
+ssh doc@$REMOTE_HOST bash -c "'
+ set -e
+ cd $REMOTE_PATH
+ echo \"[*] Rebuilding venv...\"
+ python3 -m venv venv
+ source venv/bin/activate
+ pip install -r requirements.txt
+ pip install pyinstaller
+ echo \"[*] Building binary...\"
+ pyinstaller --onefile --name=$APP_NAME \
+ --add-data \"templates:templates\" \
+ --add-data \"migrations:migrations\" \
+ $MAIN_SCRIPT
+ echo \"[+] Build complete. Binary available in: $REMOTE_PATH/dist/$APP_NAME\"
+'"
+
+echo "[βœ“] Done. You can test the binary at Shredder:$REMOTE_PATH/dist/$APP_NAME"
diff --git a/restore.sh b/restore.sh
new file mode 100755
index 0000000..55efa02
--- /dev/null
+++ b/restore.sh
@@ -0,0 +1,68 @@
+#!/bin/bash
+
+# Configuration
+REMOTE_SERVER="root@offsite.doctatortot.com"
+REMOTE_BACKUP_DIR="/mnt/backup1/mastodon"
+LOCAL_RESTORE_DIR="/home/mastodon/restore"
+MASTODON_DIR="/home/mastodon/live"
+PG_DB_NAME="mastodon_production"
+PG_USER="mastodon"
+PG_HOST="" # Leave empty for local socket connection
+PG_PORT="5432"
+
+# Create the local restore directory if it doesn't exist
+mkdir -p "$LOCAL_RESTORE_DIR" || { echo "Failed to create restore directory"; exit 1; }
+
+# Find the latest backup file on the remote server
+echo "Finding the latest backup file on the remote server..."
+LATEST_BACKUP=$(ssh $REMOTE_SERVER "ls -t $REMOTE_BACKUP_DIR/mastodon_backup_*.tar.gz | head -n 1")
+
+if [ -z "$LATEST_BACKUP" ]; then
+ echo "No backup files found on the remote server."
+ exit 1
+fi
+
+echo "Latest backup file found: $LATEST_BACKUP"
+
+# Transfer the latest backup file to the local server
+echo "Transferring the latest backup file to the local server..."
+scp "$REMOTE_SERVER:$LATEST_BACKUP" "$LOCAL_RESTORE_DIR" || { echo "Failed to transfer backup file"; exit 1; }
+
+# Extract the backup file
+BACKUP_FILE=$(basename "$LATEST_BACKUP")
+echo "Extracting the backup file..."
+tar -xzf "$LOCAL_RESTORE_DIR/$BACKUP_FILE" -C "$LOCAL_RESTORE_DIR" || { echo "Failed to extract backup file"; exit 1; }
+
+# Stop Mastodon services
+echo "Stopping Mastodon services..."
+sudo systemctl stop mastodon-web mastodon-sidekiq mastodon-streaming || { echo "Failed to stop Mastodon services"; exit 1; }
+
+# Restore Mastodon files
+echo "Restoring Mastodon files..."
+rsync -av --delete "$LOCAL_RESTORE_DIR/mastodon_backup_*/mastodon_files/" "$MASTODON_DIR" || { echo "rsync failed"; exit 1; }
+
+# Restore PostgreSQL database
+echo "Restoring PostgreSQL database..."
+PG_DUMP_FILE=$(find "$LOCAL_RESTORE_DIR" -name "mastodon_db_*.sql")
+if [ -z "$PG_DUMP_FILE" ]; then
+ echo "Database dump file not found."
+ exit 1
+fi
+
+psql -U "$PG_USER" -d "$PG_DB_NAME" -f "$PG_DUMP_FILE" || { echo "psql restore failed"; exit 1; }
+
+# Run database migrations
+echo "Running database migrations..."
+cd "$MASTODON_DIR"
+RAILS_ENV=production bundle exec rails db:migrate || { echo "Database migrations failed"; exit 1; }
+
+# Start Mastodon services
+echo "Starting Mastodon services..."
+sudo systemctl start mastodon-web mastodon-sidekiq mastodon-streaming || { echo "Failed to start Mastodon services"; exit 1; }
+
+# Clean up
+echo "Cleaning up..."
+rm -rf "$LOCAL_RESTORE_DIR/mastodon_backup_*" || { echo "Failed to clean up restore files"; exit 1; }
+rm "$LOCAL_RESTORE_DIR/$BACKUP_FILE" || { echo "Failed to remove backup file"; exit 1; }
+
+echo "Restore completed successfully."
diff --git a/retention.sh b/retention.sh
new file mode 100755
index 0000000..b9bf8a7
--- /dev/null
+++ b/retention.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+SRC_BASE="/mnt/convert/archives"
+DEST_BASE="/mnt/5tb/archives"
+RETENTION_MONTHS=3
+TODAY=$(date +%s)
+
+LOG_FILE="/var/log/archive_retention.log"
+
+# Log start of run
+echo "=== Archive Sync: $(date) ===" >> "$LOG_FILE"
+
+# Init counters
+files_checked=0
+files_archived=0
+files_deleted=0
+
+# Traverse all subfolders
+find "$SRC_BASE" -type f -name '*.mp3' | while read -r file; do
+ ((files_checked++))
+
+ filename=$(basename "$file")
+ relative_path=$(realpath --relative-to="$SRC_BASE" "$file")
+ subfolder=$(dirname "$relative_path")
+ dest_folder="$DEST_BASE/$subfolder"
+ dest_file="$dest_folder/$filename"
+
+ # --- Date extraction logic (supports YYYY-MM-DD or YYYYMMDD) ---
+ file_date=$(echo "$filename" | grep -oP '\d{4}-\d{2}-\d{2}')
+
+ if [ -z "$file_date" ]; then
+ raw_date=$(echo "$filename" | grep -oP '\d{8}')
+ if [ ! -z "$raw_date" ]; then
+ file_date="${raw_date:0:4}-${raw_date:4:2}-${raw_date:6:2}"
+ fi
+ fi
+
+ if [ -z "$file_date" ]; then
+ echo "Skipping (no valid date found): $filename" >> "$LOG_FILE"
+ continue
+ fi
+
+ file_epoch=$(date -d "$file_date" +%s 2>/dev/null)
+ if [ -z "$file_epoch" ]; then
+ echo "Skipping (invalid date format): $filename" >> "$LOG_FILE"
+ continue
+ fi
+
+ age_months=$(( (TODAY - file_epoch) / 2592000 ))
+
+ # Make sure destination folder exists
+ mkdir -p "$dest_folder"
+
+ if [ "$age_months" -le "$RETENTION_MONTHS" ]; then
+ if [ ! -f "$dest_file" ]; then
+ echo "Archiving: $filename β†’ $dest_folder" >> "$LOG_FILE"
+ cp "$file" "$dest_file"
+ ((files_archived++))
+ fi
+ else
+ if [ -f "$dest_file" ]; then
+ echo "Deleting expired: $filename" >> "$LOG_FILE"
+ rm "$dest_file"
+ ((files_deleted++))
+ fi
+ fi
+done
+
+# Final summary log
+echo "Checked: $files_checked | Archived: $files_archived | Deleted: $files_deleted" >> "$LOG_FILE"
+echo "" >> "$LOG_FILE"
diff --git a/rsync_zfs_sync_helper.sh b/rsync_zfs_sync_helper.sh
new file mode 100755
index 0000000..685eb88
--- /dev/null
+++ b/rsync_zfs_sync_helper.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+# sync_to_vault.sh
+# Rsync + ZFS sanity tool with built-in slash wisdom
+
+set -euo pipefail
+
+# === CONFIG ===
+VAULT_HOST="thevault.sshjunkie.com"
+BASE_TARGET="/nexus/miniodata/assets"
+
+# === USAGE ===
+if [[ $# -lt 2 ]]; then
+ echo "Usage: $0 <source_dir> <bucket_name>"
+ echo "Example: $0 /mnt/backup3/tempshit/genesisassets/ genesisassets-secure"
+ exit 1
+fi
+
+SRC="$1"
+BUCKET="$2"
+DST="${BASE_TARGET}/${BUCKET}/"
+
+# === WISDOM ===
+echo "🧘 Trailing slashes, my friend. β€” John Northrup"
+echo
+
+if [[ "$SRC" != */ ]]; then
+ echo "⚠️ Warning: Source path does not end in a slash."
+ echo " You may be copying the folder itself instead of its contents."
+ echo " You probably want: ${SRC}/"
+ echo
+fi
+
+# === VERIFY SOURCE ===
+if [[ ! -d "$SRC" ]]; then
+ echo "❌ Source directory does not exist: $SRC"
+ exit 1
+fi
+
+# === CREATE ZFS DATASET ON REMOTE IF MISSING ===
+echo "πŸ” Ensuring dataset exists on $VAULT_HOST..."
+ssh root@$VAULT_HOST "zfs list nexus/miniodata/assets/$BUCKET" >/dev/null 2>&1 || {
+ echo "πŸ“ Creating dataset nexus/miniodata/assets/$BUCKET on $VAULT_HOST"
+ ssh root@$VAULT_HOST "zfs create nexus/miniodata/assets/$BUCKET"
+}
+
+# === RSYNC ===
+echo "πŸš€ Starting rsync from $SRC to $VAULT_HOST:$DST"
+rsync -avhP "$SRC" root@$VAULT_HOST:"$DST"
+
+# === SNAPSHOT ===
+SNAPNAME="rsync_$(date +%Y%m%d_%H%M%S)"
+echo "πŸ“Έ Creating post-sync snapshot: $SNAPNAME"
+ssh root@$VAULT_HOST "zfs snapshot nexus/miniodata/assets/$BUCKET@$SNAPNAME"
+
+# === DONE ===
+echo "βœ… Sync and snapshot complete: $BUCKET@$SNAPNAME"
diff --git a/run_prune_from_krang.sh b/run_prune_from_krang.sh
new file mode 100755
index 0000000..0759191
--- /dev/null
+++ b/run_prune_from_krang.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+# === CONFIG ===
+REMOTE="root@thevault.sshjunkie.com"
+SCRIPT_PATH="/root/prune_snapshots.sh"
+LOG_FILE="/home/doc/genesis-tools/prune_trigger.log"
+DRY_RUN=false
+
+[[ "$1" == "--dry-run" ]] && DRY_RUN=true
+
+TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
+echo "[$TIMESTAMP] Initiating snapshot prune on The Vault" >> "$LOG_FILE"
+
+if $DRY_RUN; then
+ ssh "$REMOTE" "bash $SCRIPT_PATH --dry-run"
+else
+ ssh "$REMOTE" "bash $SCRIPT_PATH"
+fi
diff --git a/snapshot_send_to_vault.sh b/snapshot_send_to_vault.sh
new file mode 100755
index 0000000..387aad1
--- /dev/null
+++ b/snapshot_send_to_vault.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+# snapshot_and_send_to_vault.sh
+# Create a ZFS snapshot of /deadbeef/genesis-tools and send it to the vault
+
+set -euo pipefail
+
+# βš™οΈ Config
+POOL="deadbeef"
+DATASET="genesis-tools"
+REMOTE_USER="root"
+REMOTE_HOST="thevault.bounceme.net"
+REMOTE_DATASET="backups/krang"
+
+# πŸ—“οΈ Create snapshot name
+DATE=$(date +%F)
+SNAPSHOT_NAME="${DATE}"
+
+echo "πŸ”§ Creating snapshot ${POOL}/${DATASET}@${SNAPSHOT_NAME}..."
+sudo zfs snapshot ${POOL}/${DATASET}@${SNAPSHOT_NAME}
+
+echo "πŸš€ Sending snapshot to ${REMOTE_HOST}..."
+sudo zfs send ${POOL}/${DATASET}@${SNAPSHOT_NAME} | \
+ ssh ${REMOTE_USER}@${REMOTE_HOST} sudo zfs receive -F ${REMOTE_DATASET}
+
+echo "βœ… Snapshot ${SNAPSHOT_NAME} replicated to ${REMOTE_HOST}:${REMOTE_DATASET}"
+
+echo "πŸŽ‰ All done!"
diff --git a/startemup.sh b/startemup.sh
new file mode 100755
index 0000000..d0f5f17
--- /dev/null
+++ b/startemup.sh
@@ -0,0 +1,67 @@
+#!/usr/bin/env bash
+# Launches Python virtual environments in separate screen sessions or manages their status
+
+declare -A VENV_APPS=(
+ [archivecontrol]="recordit2.py"
+ [archivelist]="recordit2.py"
+ [recordtheshow]="app.py"
+ [radiotoot]="app.py"
+ [hostingtoot]="app.py"
+ [radiotoot]="live.py"
+)
+
+SCRIPT_BASE="/home/doc/genesis-tools"
+VENV_BASE="/home/doc"
+
+if [[ "$1" == "--check" ]]; then
+ echo "πŸ” Checking screen session health..."
+ for name in "${!VENV_APPS[@]}"; do
+ if screen -list | grep -q "\.${name}[[:space:]]"; then
+ echo "πŸ“¦ $name βœ… Running"
+ else
+ echo "πŸ“¦ $name ❌ Not running"
+ fi
+ done
+ exit 0
+fi
+
+if [[ "$1" == "--stop" ]]; then
+ echo "πŸ›‘ Stopping all screen sessions..."
+ for name in "${!VENV_APPS[@]}"; do
+ if screen -list | grep -q "\.${name}[[:space:]]"; then
+ screen -S "$name" -X quit && echo "πŸ›‘ $name stopped"
+ else
+ echo "⚠️ $name not running"
+ fi
+ done
+ exit 0
+fi
+
+if [[ "$1" == "--restart" ]]; then
+ echo "πŸ”„ Restarting all apps..."
+ "$0" --stop
+ sleep 2
+ "$0"
+ exit 0
+fi
+
+for name in "${!VENV_APPS[@]}"; do
+ script_name="${VENV_APPS[$name]}"
+ script_path="$SCRIPT_BASE/$name/$script_name"
+
+ # Use 'toot' venv for both radiotoot and hostingtoot
+ if [[ "$name" == "radiotoot" || "$name" == "hostingtoot" ]]; then
+ venv_activate="$VENV_BASE/toot/bin/activate"
+ else
+ venv_activate="$VENV_BASE/$name/bin/activate"
+ fi
+
+ if [[ -f "$script_path" && -f "$venv_activate" ]]; then
+ echo "πŸš€ Launching $name in screen session..."
+ screen -S "$name" -dm bash -c "source '$venv_activate'; cd '$SCRIPT_BASE/$name'; python3 '$script_name'"
+ else
+ echo "⚠️ Script or venv not found for $name"
+ fi
+done
+
+echo "βœ… All venv apps launched in screen sessions."
diff --git a/sync-to-vault.sh b/sync-to-vault.sh
new file mode 100755
index 0000000..7f01a69
--- /dev/null
+++ b/sync-to-vault.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# === CONFIG ===
+SRC_HOST="shredderv1"
+SRC_BASE="/mnt/raid5/minio-data"
+DEST_HOST="root@thevault@sshjunkie.com"
+LOG="/home/doc/genesis-tools/vault_sync.log"
+TG_TOKEN="8178867489:AAH0VjN7VnZSCIWasSz_y97iBLLjPJA751k"
+TG_CHAT_ID="1559582356"
+
+declare -A BUCKETS_TO_DATASETS=(
+ [genesisassets]="nexus/genesisassets-secure"
+ [genesislibrary]="nexus/genesislibrary-secure"
+ [assets_archives]="nexus/genesisarchives-secure"
+ [assets_mastodon]="nexus/assets_mastodon"
+ [assets_azuracast]="nexus/assets_azuracast"
+)
+
+echo "[$(date)] πŸ” Starting MinIO-to-Vault sync job..." >> "$LOG"
+
+for bucket in "${!BUCKETS_TO_DATASETS[@]}"; do
+ src="${SRC_HOST}:${SRC_BASE}/${bucket}/"
+ dest="${BUCKETS_TO_DATASETS[$bucket]}/"
+
+ echo "[*] Syncing $bucket β†’ $dest" >> "$LOG"
+ rsync -aHAXv --delete "$src" "$DEST_HOST:$dest" >> "$LOG" 2>&1
+
+ curl -s -X POST "https://api.telegram.org/bot$TG_TOKEN/sendMessage" \
+ -d chat_id="$TG_CHAT_ID" \
+ -d text="βœ… Sync complete: $bucket β†’ ${BUCKETS_TO_DATASETS[$bucket]}"
+done
+
+echo "[$(date)] βœ… All MinIO buckets synced to The Vault." >> "$LOG"
diff --git a/sync-trigger.sh b/sync-trigger.sh
new file mode 100755
index 0000000..fff1f67
--- /dev/null
+++ b/sync-trigger.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+# === CONFIG ===
+REMOTE_HOST="shredder.sshjunkie.com"
+REMOTE_USER="doc"
+REMOTE_SCRIPT="/home/doc/sync.sh"
+LOG_TAG="[Krang β†’ SPL Sync]"
+
+# === Mastodon Alert Settings ===
+MASTODON_INSTANCE="https://chatwithus.live"
+ACCESS_TOKEN="07w3Emdw-cv_TncysrNU8Ed_sHJhwtnvKmnLqKlHmKA"
+TOOT_VISIBILITY="public"
+
+# === Telegram Settings ===
+TELEGRAM_BOT_TOKEN="8178867489:AAH0VjN7VnZSCIWasSz_y97iBLLjPJA751k"
+TELEGRAM_CHAT_ID="1559582356"
+
+# === Execution ===
+echo "$LOG_TAG Triggering remote sync..."
+OUTPUT=$(ssh ${REMOTE_USER}@${REMOTE_HOST} "${REMOTE_SCRIPT}" 2>&1)
+
+if echo "$OUTPUT" | grep -q "All syncs finished"; then
+ echo "$LOG_TAG βœ… Sync complete."
+
+ # Mastodon alert
+ curl -s -X POST "$MASTODON_INSTANCE/api/v1/statuses" \
+ -H "Authorization: Bearer $ACCESS_TOKEN" \
+ -d "status=βœ… SPL Sync completed successfully via Krang" \
+ -d "visibility=$TOOT_VISIBILITY" >/dev/null
+
+ # Telegram alert
+ curl -s -X POST "https://api.telegram.org/bot$TELEGRAM_BOT_TOKEN/sendMessage" \
+ -d "chat_id=$TELEGRAM_CHAT_ID" \
+ -d "text=βœ… SPL Sync completed successfully from Krang." >/dev/null
+else
+ echo "$LOG_TAG ❌ Sync may have failed. Check logs."
+
+ # Failure alerts
+ curl -s -X POST "$MASTODON_INSTANCE/api/v1/statuses" \
+ -H "Authorization: Bearer $ACCESS_TOKEN" \
+ -d "status=❌ SPL Sync failed from Krang. Check logs." \
+ -d "visibility=$TOOT_VISIBILITY" >/dev/null
+
+ curl -s -X POST "https://api.telegram.org/bot$TELEGRAM_BOT_TOKEN/sendMessage" \
+ -d "chat_id=$TELEGRAM_CHAT_ID" \
+ -d "text=❌ SPL Sync failed from Krang. Manual check needed." >/dev/null
+fi
+
diff --git a/sync.sh b/sync.sh
new file mode 100755
index 0000000..0b52969
--- /dev/null
+++ b/sync.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+# Setup alias (even if it already exists)
+mc alias set minio http://localhost:9000 genesisadmin MutationXv3! || true
+
+echo "[*] Syncing genesisassets β†’ Q:"
+mc mirror \
+ --overwrite \
+ --remove \
+ --exclude "/System Volume Information/**" \
+ --exclude "/$RECYCLE.BIN/**" \
+ --exclude "**/Thumbs.db" \
+ minio/genesisassets /mnt/spl/qdrive || echo "[!] Q: sync completed with warnings"
+
+echo "[*] Syncing genesislibrary β†’ R:"
+mc mirror \
+ --overwrite \
+ --remove \
+ --exclude "/System Volume Information/**" \
+ --exclude "/$RECYCLE.BIN/**" \
+ --exclude "**/Thumbs.db" \
+ minio/genesislibrary /mnt/spl/rdrive || echo "[!] R: sync completed with warnings"
+
+echo "[βœ“] All syncs finished"
diff --git a/sync_everything_v3.sh b/sync_everything_v3.sh
new file mode 100755
index 0000000..6cfdca4
--- /dev/null
+++ b/sync_everything_v3.sh
@@ -0,0 +1,91 @@
+#!/bin/bash
+
+# GenesisSync: SPL Remote Sync Runner (Krang Orchestrated)
+# Krang orchestrates sync by SSHing into Shredder, where the SPL shares live.
+
+SHREDDER_HOST="shredder.sshjunkie.com"
+SHREDDER_USER="doc"
+REMOTE_SCRIPT="/tmp/genesis_sync_remote.sh"
+LOGFILE="/home/doc/genesis_sync_spl.log"
+
+# Telegram settings
+TELEGRAM_BOT_TOKEN="AAFrXrxWVQyGxR6sBOKFPchQ3BsMdgqIZsY"
+TELEGRAM_CHAT_ID="8127808884"
+
+send_telegram() {
+ local message="$1"
+ curl -s -X POST "https://api.telegram.org/bot$TELEGRAM_BOT_TOKEN/sendMessage" \
+ -d chat_id="$TELEGRAM_CHAT_ID" \
+ -d text="$message" \
+ -d parse_mode="Markdown" > /dev/null
+}
+
+start_time=$(date +%s)
+start_date=$(date)
+echo "[GenesisSync] Starting remote SPL sync at $start_date" | tee -a "$LOGFILE"
+send_telegram "πŸ›  *GenesisSync SPL (Remote) Started*
+Time: $start_date" || true
+
+# Generate the remote script that Shredder will execute
+cat << 'EOF' > /tmp/genesis_sync_remote.sh
+#!/bin/bash
+MOUNT_BASE="/mnt/spl"
+SPL_HOST="38.102.127.163"
+SPL_USER="Administrator"
+SPL_PASS="MutationXv3!"
+HOT_BUCKET="genesis-hot:"
+COLD_BUCKET="genesis-cold:"
+LOGFILE="/tmp/genesis_sync_remote.log"
+
+declare -A DIRS=(
+ [splmedia]="splmedia"
+ [splshows]="splshows"
+ [splassets]="splassets"
+)
+
+echo "[GenesisSync:Shredder] Starting local operations at $(date)" > "$LOGFILE"
+
+for key in "${!DIRS[@]}"; do
+ share_name="${DIRS[$key]}"
+ mount_point="$MOUNT_BASE/$share_name"
+ local_path="$mount_point"
+
+ mkdir -p "$mount_point"
+
+ echo "[β†’] Mounting //$SPL_HOST/$share_name to $mount_point" >> "$LOGFILE"
+ mount -t cifs "//$SPL_HOST/$share_name" "$mount_point" -o username="$SPL_USER",password="$SPL_PASS",vers=3.0 || echo "Mount failed for $share_name" >> "$LOGFILE"
+
+ echo "[β†’] Rsync SPL ➝ Shredder: $key" >> "$LOGFILE"
+ rsync -avz --delete "$mount_point/" "$local_path/" >> "$LOGFILE" 2>&1
+
+ echo "[β†’] Rsync Shredder ➝ SPL (reverse): $key" >> "$LOGFILE"
+ rsync -avzu "$local_path/" "$mount_point/" >> "$LOGFILE" 2>&1
+
+ echo "[β†’] Unmounting $mount_point" >> "$LOGFILE"
+ umount "$mount_point"
+
+ echo "[β†’] Mirror ➝ ProtocolY: $key" >> "$LOGFILE"
+ rclone sync "$local_path/" "$HOT_BUCKET/$share_name" --transfers=8 --log-file="$LOGFILE" --log-level INFO
+
+ echo "[β†’] Mirror ➝ ProtocolZ: $key" >> "$LOGFILE"
+ rclone sync "$local_path/" "$COLD_BUCKET/$share_name" --transfers=4 --log-file="$LOGFILE" --log-level INFO
+done
+
+echo "[βœ“] Shredder sync done at $(date)" >> "$LOGFILE"
+EOF
+
+# Push the script to Shredder
+scp /tmp/genesis_sync_remote.sh "$SHREDDER_USER@$SHREDDER_HOST:$REMOTE_SCRIPT" > /dev/null 2>&1
+ssh "$SHREDDER_USER@$SHREDDER_HOST" "chmod +x $REMOTE_SCRIPT && sudo $REMOTE_SCRIPT"
+
+# Retrieve the log
+scp "$SHREDDER_USER@$SHREDDER_HOST:/tmp/genesis_sync_remote.log" "$LOGFILE" > /dev/null 2>&1
+
+end_time=$(date +%s)
+duration=$((end_time - start_time))
+end_date=$(date)
+echo "[βœ“] GenesisSync (Remote) completed in ${duration}s at $end_date" | tee -a "$LOGFILE"
+send_telegram "βœ… *GenesisSync SPL Completed (Remote)*
+Duration: ${duration}s
+Finished: $end_date" || true
+
diff --git a/tothebank.sh b/tothebank.sh
new file mode 100755
index 0000000..9522f6f
--- /dev/null
+++ b/tothebank.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Mastodon Media Audit: Find orphaned .part.* dirs & estimate space wasted
+# For mounted MinIO (e.g., /assets/minio-data/mastodon)
+
+TARGET="/assets/minio-data/mastodon/"
+LOG="/tmp/mastodon_zombie_audit.log"
+REPORT="/tmp/mastodon_zombie_report.txt"
+
+echo "[*] Auditing .part.* zombie files under: $TARGET" | tee "$LOG"
+
+# Find all part.1 or similar that are actually directories
+find "$TARGET" -type d -name "part.*" > "$REPORT"
+
+TOTAL=$(cat "$REPORT" | wc -l)
+SIZE=$(du -shc $(cat "$REPORT" 2>/dev/null) 2>/dev/null | tail -1 | awk '{print $1}')
+
+echo "[βœ”οΈ] Found $TOTAL suspicious .part.* directories" | tee -a "$LOG"
+echo "[πŸ“¦] Estimated wasted space: $SIZE" | tee -a "$LOG"
+
+echo "Top offenders:" | tee -a "$LOG"
+du -sh $(cat "$REPORT" | head -n 20) 2>/dev/null | sort -hr | tee -a "$LOG"
+
+echo -e "\n🚨 To delete these, run:\n sudo xargs rm -rf < $REPORT" | tee -a "$LOG"
diff --git a/upgrade.sh b/upgrade.sh
new file mode 100755
index 0000000..58437b9
--- /dev/null
+++ b/upgrade.sh
@@ -0,0 +1,88 @@
+#!/bin/bash
+
+# ---- CONFIGURATION ----
+DOMAIN="your.mastodon.domain" # Replace this with your real domain
+ACCOUNT_USERNAME="administration"
+SCRIPT_PATH="/root/finish_upgrade.sh"
+LOGFILE="/root/mastodon_upgrade_$(date +%F_%H-%M-%S).log"
+exec > >(tee -a "$LOGFILE") 2>&1
+set -e
+
+echo "===== Mastodon 20.04 β†’ 22.04 Upgrade Starter ====="
+
+read -p "❗ Have you backed up your system and database? (yes/no): " confirmed
+if [[ "$confirmed" != "yes" ]]; then
+ echo "❌ Aborting. Please take a backup."
+ exit 1
+fi
+
+echo "πŸ”§ Updating system..."
+apt update && apt upgrade -y
+apt install update-manager-core curl -y
+
+echo "πŸ›‘ Stopping Mastodon..."
+systemctl stop mastodon-web mastodon-sidekiq mastodon-streaming
+
+echo "πŸ” Preparing post-reboot upgrade finalization..."
+
+# ---- Create finish_upgrade.sh ----
+cat << EOF > $SCRIPT_PATH
+#!/bin/bash
+LOGFILE="/root/mastodon_post_upgrade_\$(date +%F_%H-%M-%S).log"
+exec > >(tee -a "\$LOGFILE") 2>&1
+set -e
+
+echo "===== Post-Reboot Finalization Script ====="
+
+echo "πŸ”„ Restarting Mastodon services..."
+systemctl daemon-reexec
+systemctl daemon-reload
+systemctl start mastodon-web mastodon-sidekiq mastodon-streaming
+
+echo "βœ… Checking service status..."
+systemctl status mastodon-web --no-pager
+systemctl status mastodon-sidekiq --no-pager
+systemctl status mastodon-streaming --no-pager
+
+echo "🌐 Homepage check..."
+if curl --silent --fail https://$DOMAIN >/dev/null; then
+ echo "βœ… Homepage is reachable."
+else
+ echo "❌ Homepage failed to load."
+fi
+
+echo "πŸ“£ Posting announcement toot..."
+cd /home/mastodon/live
+sudo -u mastodon -H bash -c '
+RAILS_ENV=production bundle exec rails runner "
+acct = Account.find_by(username: \\"$ACCOUNT_USERNAME\\")
+if acct
+ PostStatusService.new.call(acct, text: \\"βœ… Server upgrade to Ubuntu 22.04 complete. We\\'re back online!\\")
+end
+"'
+
+echo "🧹 Cleaning up..."
+apt autoremove -y && apt autoclean -y
+
+echo "🚫 Removing rc.local to prevent rerun..."
+rm -f /etc/rc.local
+rm -f $SCRIPT_PATH
+
+echo "βœ… Post-upgrade steps complete."
+EOF
+
+chmod +x $SCRIPT_PATH
+
+# ---- Set rc.local to run after reboot ----
+cat << EOF > /etc/rc.local
+#!/bin/bash
+bash $SCRIPT_PATH
+exit 0
+EOF
+
+chmod +x /etc/rc.local
+
+echo ""
+echo "πŸš€ Starting do-release-upgrade..."
+sleep 3
+do-release-upgrade
diff --git a/validate_zfs.sh b/validate_zfs.sh
new file mode 100755
index 0000000..114ed02
--- /dev/null
+++ b/validate_zfs.sh
@@ -0,0 +1,78 @@
+#!/bin/bash
+
+# CONFIG
+ZFS_BASE="/mnt/zfs_minio"
+BUCKETS=(
+ "assets-azuracastassets"
+ "assets-genesisassets"
+ "assets-genesislibrary"
+ "assets-genesisarchives"
+ "assets-mastodon"
+)
+SAMPLE_COUNT=5
+USER="minio-user"
+GROUP="minio-user"
+
+# COLORS
+GREEN="\033[0;32m"
+RED="\033[0;31m"
+YELLOW="\033[1;33m"
+NC="\033[0m"
+
+echo "πŸ” Validating migrated MinIO buckets..."
+echo
+
+for bucket in "${BUCKETS[@]}"; do
+ OLD_PATH="${ZFS_BASE}/${bucket}"
+ NEW_BUCKET=$(echo "$bucket" | tr '_' '-')
+ NEW_PATH="${ZFS_BASE}/${NEW_BUCKET}"
+
+ echo -e "${YELLOW}=== Bucket: $bucket β†’ $NEW_BUCKET ===${NC}"
+
+ if [[ ! -d "$OLD_PATH" || ! -d "$NEW_PATH" ]]; then
+ echo -e "${RED}❌ Missing directory: ${OLD_PATH} or ${NEW_PATH}${NC}"
+ echo
+ continue
+ fi
+
+ # 1. File count check
+ old_count=$(find "$OLD_PATH" -type f | wc -l)
+ new_count=$(find "$NEW_PATH" -type f | wc -l)
+ echo "πŸ“¦ File count: $old_count (old) vs $new_count (new)"
+
+ [[ "$old_count" -eq "$new_count" ]] && \
+ echo -e "${GREEN}βœ… File count matches${NC}" || \
+ echo -e "${RED}❌ File count mismatch${NC}"
+
+ # 2. Sample checksum
+ echo "πŸ” Verifying checksums for $SAMPLE_COUNT random files..."
+ mismatch=0
+ samples=$(find "$OLD_PATH" -type f | shuf -n "$SAMPLE_COUNT" 2>/dev/null)
+
+ for file in $samples; do
+ rel_path="${file#$OLD_PATH/}"
+ old_sum=$(sha256sum "$OLD_PATH/$rel_path" | awk '{print $1}')
+ new_sum=$(sha256sum "$NEW_PATH/$rel_path" | awk '{print $1}')
+
+ if [[ "$old_sum" != "$new_sum" ]]; then
+ echo -e "${RED}❌ Mismatch: $rel_path${NC}"
+ ((mismatch++))
+ else
+ echo -e "${GREEN}βœ” Match: $rel_path${NC}"
+ fi
+ done
+
+ [[ "$mismatch" -eq 0 ]] && \
+ echo -e "${GREEN}βœ… All sample checksums match${NC}" || \
+ echo -e "${RED}❌ $mismatch checksum mismatch(es) found${NC}"
+
+ # 3. Ownership check
+ ownership_issues=$(find "$NEW_PATH" ! -user "$USER" -o ! -group "$GROUP" | wc -l)
+ [[ "$ownership_issues" -eq 0 ]] && \
+ echo -e "${GREEN}βœ… Ownership is correct${NC}" || \
+ echo -e "${RED}❌ $ownership_issues ownership issues in $NEW_PATH${NC}"
+
+ echo
+done
+
+echo -e "${YELLOW}πŸ“Š Validation complete. Review any ❌ issues before going live with MinIO.${NC}"
diff --git a/venv-backup-script.sh b/venv-backup-script.sh
new file mode 100755
index 0000000..ac67e39
--- /dev/null
+++ b/venv-backup-script.sh
@@ -0,0 +1,85 @@
+#!/usr/bin/env bash
+set -euo pipefail # This ensures the script will stop execution if any command fails, and that unset variables will cause errors
+
+### Configuration ###
+# Setting the root directory for virtual environments
+VENV_ROOT="/home/doc"
+# Define the backup directory where backups will be stored locally
+BACKUP_DIR="$VENV_ROOT/backups"
+# Define the retention period for local backups in days. Older backups will be deleted.
+RETENTION_DAYS=7
+
+# Remote settings for syncing backups to a remote server
+# Define the SSH user for remote access
+REMOTE_USER="root"
+# Define the remote host (the server where backups will be stored)
+REMOTE_HOST="thevault.bounceme.net"
+# Define the path on the remote server where the backups will be stored
+REMOTE_PATH="/mnt/backup3/pythonvenvs"
+
+### Derived ###
+# Generate a timestamp based on the current date and time to create unique backup file names
+DATE=$(date +'%F_%H-%M-%S')
+# Define the full path of the backup file to be created locally
+BACKUP_FILE="$BACKUP_DIR/venvs_backup_$DATE.tar.gz"
+
+# Ensure that the backup directory exists, and create it if it does not
+mkdir -p "$BACKUP_DIR"
+
+# 1) Find all virtual environments in the specified root directory
+# We are searching for directories under $VENV_ROOT that contain a 'bin/activate' file
+# This file is typically present in Python virtual environments
+mapfile -t VENV_DIRS < <(
+ find "$VENV_ROOT" -maxdepth 1 -type d \
+ -exec test -f "{}/bin/activate" \; -print
+)
+
+# If no virtual environments are found, print an error and exit the script
+if [ ${#VENV_DIRS[@]} -eq 0 ]; then
+ echo "❌ No virtual environments found under $VENV_ROOT"
+ exit 1
+fi
+
+# 2) Extract the basenames of the virtual environments to use in the backup process
+VENV_NAMES=()
+for path in "${VENV_DIRS[@]}"; do
+ VENV_NAMES+=( "$(basename "$path")" )
+done
+
+# Inform the user about which virtual environments are being backed up
+echo "πŸ”„ Backing up virtual environments: ${VENV_NAMES[*]}"
+
+# Create a tarball archive of the found virtual environments
+# We use the '-C' option to change the directory to $VENV_ROOT before adding the directories
+tar czf "$BACKUP_FILE" -C "$VENV_ROOT" "${VENV_NAMES[@]}"
+
+# Notify the user that the local backup was saved successfully
+echo "βœ… Local backup saved to $BACKUP_FILE"
+
+# 3) Push the backup to the remote server using rsync
+# Notify the user that the backup is being uploaded
+echo "πŸ“‘ Sending backup to ${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_PATH}/"
+# The rsync command synchronizes the backup file with the remote server
+# -a: Archive mode (preserves symbolic links, permissions, etc.)
+# -z: Compress file data during the transfer
+# --progress: Show progress during the transfer
+rsync -az --progress "$BACKUP_FILE" \
+ "${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_PATH}/"
+
+# Check the result of the rsync command. If it failed, print an error and exit.
+if [ $? -ne 0 ]; then
+ echo "❌ Remote sync failed!"
+ exit 1
+else
+ echo "βœ… Remote sync succeeded."
+fi
+
+# 4) Rotate old local backups
+# Inform the user that old backups are being deleted
+echo "πŸ—‘οΈ Removing local backups older than $RETENTION_DAYS days..."
+# The 'find' command searches for backup files older than the retention period and deletes them
+find "$BACKUP_DIR" -type f -name "venvs_backup_*.tar.gz" \
+ -mtime +$RETENTION_DAYS -delete
+
+# Notify the user that the backup and cleanup process is complete
+echo "πŸŽ‰ Backup and cleanup complete."
diff --git a/verify_minio.sh b/verify_minio.sh
new file mode 100755
index 0000000..affe981
--- /dev/null
+++ b/verify_minio.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+set -euo pipefail
+
+# CONFIG
+ZFS_PATH="/assets/"
+MINIO_USER="minio-user"
+EXPECTED_BUCKETS=(
+ "assets_azuracast"
+ "assets_archives"
+ "assets_genesisassets"
+ "assets_genesislibrary"
+ "assets_mastodon"
+ "assets_teamtalkdata"
+)
+
+echo "=== Verifying ZFS MinIO Layout in $ZFS_PATH ==="
+
+for BUCKET in "${EXPECTED_BUCKETS[@]}"; do
+ BUCKET_PATH="$ZFS_PATH/$BUCKET"
+ echo "- Checking: $BUCKET_PATH"
+
+ if [ -d "$BUCKET_PATH" ]; then
+ echo " βœ… Exists"
+ OWNER=$(stat -c '%U' "$BUCKET_PATH")
+ if [ "$OWNER" == "$MINIO_USER" ]; then
+ echo " βœ… Ownership correct: $OWNER"
+ else
+ echo " ❌ Ownership incorrect: $OWNER"
+ fi
+ else
+ echo " ❌ Missing bucket directory!"
+ fi
+done
+
+echo ""
+echo "If MinIO is already running, run the following to confirm bucket visibility:"
+echo " mc alias set local http://localhost:9000 genesisadmin MutationXv3!"
+echo " mc ls local/"
diff --git a/verifypxe.sh b/verifypxe.sh
new file mode 100755
index 0000000..203292b
--- /dev/null
+++ b/verifypxe.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+# === Genesis PXE Verifier ===
+# Verifies iPXE script and image accessibility over Tailscale
+
+TAILSCALE_IP="100.113.50.65"
+VM_NAME="$1"
+
+if [[ -z "$VM_NAME" ]]; then
+ echo "Usage: $0 <vm-name>"
+ exit 1
+fi
+
+IPXE_URL="http://100.113.50.65:3000/ipxe/${VM_NAME}.ipxe"
+
+
+echo "πŸ”Ž Checking iPXE script at $IPXE_URL ..."
+if ! curl -fsSL "$IPXE_URL" -o /tmp/${VM_NAME}.ipxe; then
+ echo "❌ Failed to fetch iPXE script: $IPXE_URL"
+ exit 2
+fi
+echo "βœ… iPXE script retrieved."
+
+# Extract kernel and initrd lines
+KERNEL_URL=$(grep '^kernel ' /tmp/${VM_NAME}.ipxe | awk '{print $2}')
+INITRD_URL=$(grep '^initrd ' /tmp/${VM_NAME}.ipxe | awk '{print $2}')
+
+if [[ -z "$KERNEL_URL" || -z "$INITRD_URL" ]]; then
+ echo "❌ Could not parse kernel/initrd URLs from iPXE script."
+ exit 3
+fi
+
+echo "πŸ” Kernel URL: $KERNEL_URL"
+echo "πŸ” Initrd URL: $INITRD_URL"
+
+echo "πŸ”Ž Verifying kernel URL ..."
+if ! curl -fsI "$KERNEL_URL" >/dev/null; then
+ echo "❌ Kernel file not accessible."
+ exit 4
+fi
+echo "βœ… Kernel accessible."
+
+echo "πŸ”Ž Verifying initrd URL ..."
+if ! curl -fsI "$INITRD_URL" >/dev/null; then
+ echo "❌ Initrd file not accessible."
+ exit 5
+fi
+echo "βœ… Initrd accessible."
+
+echo "πŸŽ‰ PXE verification successful for VM: $VM_NAME"
+echo "πŸš€ Ready to launch boot from $IPXE_URL"
+
+# Optional: Telegram notify (requires telegram-send config)
+if command -v telegram-send &>/dev/null; then
+ telegram-send "βœ… PXE verify passed for *${VM_NAME}*.
+Netboot source: \`${IPXE_URL}\`
+Kernel: \`${KERNEL_URL##*/}\`
+Initrd: \`${INITRD_URL##*/}\`
+Ready to launch via Proxmox." --parse-mode markdown
+fi
+
+exit 0
diff --git a/watchdog.sh b/watchdog.sh
new file mode 100755
index 0000000..9c6848d
--- /dev/null
+++ b/watchdog.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# === CONFIG ===
+WATCH_STRING="find /mnt/raid5 -type d -exec chmod o+X {} \\;" # Adjust if needed
+CHECK_INTERVAL=60 # seconds
+BOT_TOKEN="8178867489:AAH0VjN7VnZSCIWasSz_y97iBLLjPJA751k"
+CHAT_ID="1559582356"
+HOST=$(hostname)
+LOGFILE="$HOME/krang-logs/chmod_watchdog_$(date '+%Y%m%d-%H%M').log"
+mkdir -p "$HOME/krang-logs"
+
+# === FIND TARGET PID ===
+PID=$(pgrep -f "$WATCH_STRING")
+
+if [ -z "$PID" ]; then
+ echo "❌ No matching chmod process found." | tee -a "$LOGFILE"
+ exit 1
+fi
+
+echo "πŸ‘οΈ Watching PID $PID for chmod job on $HOST..." | tee -a "$LOGFILE"
+
+# === MONITOR LOOP ===
+while kill -0 "$PID" 2>/dev/null; do
+ echo "⏳ [$HOST] chmod PID $PID still running..." >> "$LOGFILE"
+ sleep "$CHECK_INTERVAL"
+done
+
+# === COMPLETE ===
+MSG="βœ… [$HOST] chmod finished on /mnt/raid5
+Time: $(date '+%Y-%m-%d %H:%M:%S')
+PID: $PID
+Watchdog confirmed completion."
+
+echo -e "$MSG" | tee -a "$LOGFILE"
+
+curl -s -X POST https://api.telegram.org/bot$BOT_TOKEN/sendMessage \
+ -d chat_id="$CHAT_ID" \
+ -d text="$MSG"
+
diff --git a/watchman.sh b/watchman.sh
new file mode 100755
index 0000000..a07e331
--- /dev/null
+++ b/watchman.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+#set -e
+export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+# === Enable Full Debug Logging ===
+exec >> /home/doc/healthchecks/watchman.log 2>&1
+set -x # Print each command as it’s run
+export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+echo "[$DATE] Watchman script executed" >> /var/log/watchman_cron.log
+
+# === Config ===
+PRIMARY_IP="38.102.127.168" # Main TeamTalk server
+BACKUP_IP="172.238.63.162" # Backup TeamTalk server
+CF_ZONE_ID="c5099d42caa2d9763227267c597cb758"
+CF_RECORD_ID="7001484a25f0fe5c323845b6695f7544"
+CF_API_TOKEN="lCz1kH6nBZPJL0EWrNI-xEDwfR0oOLpg05fq6M81"
+THRESHOLD_LATENCY=150
+THRESHOLD_LOSS=5
+BOT_TOKEN="123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11"
+CHAT_ID="987654321"
+DNS_NAME="tt.themediahub.org"
+
+LOG_FILE="/home/doc/healthchecks/watchman.log"
+DATE="$(date '+%Y-%m-%d %H:%M:%S')"
+
+# === Current DNS IP ===
+CURRENT_IP=$(/usr/bin/dig +short "$DNS_NAME" | grep -Eo '([0-9]{1,3}\.){3}[0-9]{1,3}' | head -1)
+echo "[$DATE] Current IP: $CURRENT_IP"
+# === Check Primary Server Health ===
+echo "[$DATE] πŸ”Ž Checking ping to $PRIMARY_IP..."
+PING_OUTPUT=$(/bin/ping -c 4 "$PRIMARY_IP" || echo "Ping failed")
+LATENCY=$(echo "$PING_OUTPUT" | tail -1 | /usr/bin/awk -F '/' '{print $5}')
+echo "[$DATE] Ping output: $PING_OUTPUT"
+LOSS=$(echo "$PING_OUTPUT" | /bin/grep -oP '\d+(?=% packet loss)')
+echo "[$DATE] Parsed latency: $LATENCY, loss: $LOSS"
+echo "[$DATE] Ping output: $PING_OUTPUT"
+echo "[$DATE] Parsed latency: $LATENCY, loss: $LOSS"
+echo "[$DATE] Current DNS IP: $CURRENT_IP"
+
+if [[ -z "$LATENCY" || "$LOSS" -ge "$THRESHOLD_LOSS" || ( -n "$LATENCY" && "$(echo "$LATENCY > $THRESHOLD_LATENCY" | bc)" -eq 1 ) ]]; then
+ if [[ "$CURRENT_IP" != "$BACKUP_IP" ]]; then
+ echo "[$DATE] 🚨 Primary down! Switching DNS to backup IP ($BACKUP_IP)..."
+ MESSAGE="🚨 ALERT: Primary TeamTalk ($PRIMARY_IP) down. Loss: ${LOSS}%, Latency: ${LATENCY}ms. Switching to backup: $BACKUP_IP"
+ curl -v -s -X POST "https://api.telegram.org/bot${BOT_TOKEN}/sendMessage" \
+ -d "chat_id=${CHAT_ID}" -d "text=${MESSAGE}"
+
+ echo "[$DATE] πŸ”„ Sending DNS switch request to Cloudflare..."
+ API_RESPONSE=$(curl -v -s -X PUT "https://api.cloudflare.com/client/v4/zones/${CF_ZONE_ID}/dns_records/${CF_RECORD_ID}" \
+ -H "Authorization: Bearer ${CF_API_TOKEN}" \
+ -H "Content-Type: application/json" \
+ --data "{\"type\":\"A\",\"name\":\"${DNS_NAME}\",\"content\":\"${BACKUP_IP}\",\"ttl\":60,\"proxied\":false}")
+ echo "[$DATE] Cloudflare API response: $API_RESPONSE"
+ echo "[$DATE] βœ… DNS switched to backup."
+ else
+ echo "[$DATE] πŸ”„ Primary down, but already on backup. No DNS change needed."
+ fi
+else
+ if [[ "$CURRENT_IP" != "$PRIMARY_IP" ]]; then
+ echo "[$DATE] βœ… Primary healthy! Switching DNS back to primary IP ($PRIMARY_IP)..."
+ MESSAGE="βœ… Primary TeamTalk ($PRIMARY_IP) back online. Loss: ${LOSS}%, Latency: ${LATENCY}ms. Switching DNS back to primary."
+ curl -v -s -X POST "https://api.telegram.org/bot${BOT_TOKEN}/sendMessage" \
+ -d "chat_id=${CHAT_ID}" -d "text=${MESSAGE}"
+
+ echo "[$DATE] πŸ”„ Sending DNS switch back to Cloudflare..."
+ API_RESPONSE=$(curl -v -s -X PUT "https://api.cloudflare.com/client/v4/zones/${CF_ZONE_ID}/dns_records/${CF_RECORD_ID}" \
+ -H "Authorization: Bearer ${CF_API_TOKEN}" \
+ -H "Content-Type: application/json" \
+ --data "{\"type\":\"A\",\"name\":\"${DNS_NAME}\",\"content\":\"${PRIMARY_IP}\",\"ttl\":60,\"proxied\":false}")
+ echo "[$DATE] Cloudflare API response: $API_RESPONSE"
+ echo "[$DATE] βœ… DNS switched back to primary."
+ else
+ echo "[$DATE] βœ… Primary healthy, already using primary IP. No DNS change needed."
+ fi
+fi
diff --git a/zfs_bootstrap.sh b/zfs_bootstrap.sh
new file mode 100755
index 0000000..c1ed297
--- /dev/null
+++ b/zfs_bootstrap.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+set -euo pipefail
+
+# CONFIGURATION
+ORIG_MINIO_PATH="/assets"
+NEW_ZFS_PATH="/zfs/disk1"
+MINIO_BUCKETS=(
+ "assets_azuracast"
+ "assets_archives"
+ "assets_genesisassets"
+ "assets_genesislibrary"
+ "assets_mastodon"
+ "assets_teamtalkdata"
+)
+MINIO_USER="minio-user"
+MINIO_SERVICE="minio"
+
+echo "=== Step 1: Preparing new ZFS path ==="
+mkdir -p "$NEW_ZFS_PATH"
+
+for BUCKET in "${MINIO_BUCKETS[@]}"; do
+ CLEAN_NAME="${BUCKET/assets_/}" # Remove 'assets_' prefix
+ SRC="$ORIG_MINIO_PATH/$BUCKET/"
+ DEST="$NEW_ZFS_PATH/$CLEAN_NAME/"
+
+ echo "=== Step 2: Rsyncing $BUCKET β†’ $CLEAN_NAME ==="
+ rsync -a --info=progress2 "$SRC" "$DEST"
+
+ echo "=== Step 3: Fixing ownership for: $CLEAN_NAME ==="
+ chown -R "$MINIO_USER:$MINIO_USER" "$DEST"
+done
+
+echo "=== Step 4: Update MinIO service (manual step) ==="
+echo "Set ExecStart in minio.service to:"
+echo " /usr/local/bin/minio server $NEW_ZFS_PATH --console-address \":9001\""
+
+echo "=== Step 5: Reload and restart MinIO ==="
+echo "Run:"
+echo " systemctl daemon-reload"
+echo " systemctl restart $MINIO_SERVICE"
+
+echo "=== Step 6: Validate with mc ==="
+echo "Run:"
+echo " mc alias set local http://localhost:9000 genesisadmin MutationXv3!"
+echo " mc ls local/"
+
+echo ""
+echo "βœ… All buckets (including teamtalkdata) are now synced to the ZFS backend."
+echo "To roll back, revert minio.service ExecStart and restart MinIO."