remove gotify and borg integrety check

This commit is contained in:
Yan Lin 2025-10-10 01:24:22 +02:00
parent 4e4294d334
commit 397e97561c
11 changed files with 18 additions and 986 deletions

View file

@ -1,201 +0,0 @@
# Borg backup integrity check script with notifications
# Usage: borg-integrity-check.sh <repo_url> <check_depth> <last_archives> <enable_notifications> <gotify_url> <gotify_token> <hostname>
set -euo pipefail
# Validate arguments
if [[ $# -ne 7 ]]; then
echo "Usage: $0 <repo_url> <check_depth> <last_archives> <enable_notifications> <gotify_url> <gotify_token> <hostname>"
exit 1
fi
# Get parameters
REPO_URL="$1"
CHECK_DEPTH="$2"
LAST_ARCHIVES="$3"
ENABLE_NOTIFICATIONS="$4"
GOTIFY_URL="$5"
GOTIFY_TOKEN="$6"
HOSTNAME="$7"
# Start time for tracking duration
CHECK_START=$(date +%s)
CHECK_DATE=$(date '+%Y-%m-%d %H:%M:%S')
# Initialize result variables
CHECK_RESULT="SUCCESS"
CHECK_DETAILS=""
ERRORS_FOUND=""
# Function to send notifications
send_notification() {
local priority="$1"
local title="$2"
local message="$3"
if [ "$ENABLE_NOTIFICATIONS" = "1" ] && [ -n "$GOTIFY_URL" ] && [ -n "$GOTIFY_TOKEN" ]; then
/home/yanlin/.config/nix/scripts/gotify-notify.sh \
"$GOTIFY_URL" \
"$GOTIFY_TOKEN" \
"$priority" \
"$title" \
"$message" || echo "Failed to send notification (non-critical)" >&2
fi
}
# Function to run borg check with error handling
run_borg_check() {
local check_args="$1"
local check_type="$2"
local output
local exit_code
echo "Running $check_type check..."
# Run the check and capture output
if output=$(borg check $check_args 2>&1); then
echo "$check_type check completed successfully"
CHECK_DETAILS="${CHECK_DETAILS}$check_type check passed\n"
return 0
else
exit_code=$?
echo "ERROR: $check_type check failed with exit code $exit_code" >&2
echo "Output: $output" >&2
CHECK_RESULT="FAILED"
ERRORS_FOUND="${ERRORS_FOUND}$check_type check failed (exit code: $exit_code)\n"
# Extract specific error details if available
if echo "$output" | grep -q "corrupted"; then
ERRORS_FOUND="${ERRORS_FOUND} - Corruption detected\n"
fi
if echo "$output" | grep -q "missing"; then
ERRORS_FOUND="${ERRORS_FOUND} - Missing data detected\n"
fi
return $exit_code
fi
}
# Main check logic
echo "Starting Borg integrity check for $HOSTNAME at $CHECK_DATE"
echo "Repository: $REPO_URL"
echo "Check depth: $CHECK_DEPTH"
# Repository consistency check (always performed)
if ! run_borg_check "--repository-only" "Repository consistency"; then
# Repository check failure is critical - stop here
CHECK_END=$(date +%s)
CHECK_DURATION=$((CHECK_END - CHECK_START))
send_notification "critical" \
"Borg Check Failed" \
"Repository consistency check failed!\n\nRepository: $REPO_URL\nDuration: ${CHECK_DURATION}s\n\nErrors:\n$ERRORS_FOUND\n\nImmediate attention required!"
exit 1
fi
# Archive metadata check (if depth is archives or data)
if [ "$CHECK_DEPTH" = "archives" ] || [ "$CHECK_DEPTH" = "data" ]; then
if ! run_borg_check "--archives-only" "Archive metadata"; then
# Archive check failure is serious but not necessarily critical
echo "WARNING: Archive metadata check failed, but repository is consistent"
fi
fi
# Full data verification (if depth is data)
if [ "$CHECK_DEPTH" = "data" ]; then
echo "Performing full data verification on last $LAST_ARCHIVES archives..."
# Get the list of archives and select the last N
if ARCHIVE_LIST=$(borg list --short 2>/dev/null | tail -n "$LAST_ARCHIVES"); then
if [ -n "$ARCHIVE_LIST" ]; then
# Build the check command with specific archives
ARCHIVE_ARGS=""
while IFS= read -r archive; do
ARCHIVE_ARGS="$ARCHIVE_ARGS --glob-archives '$archive'"
done <<< "$ARCHIVE_LIST"
# Run data verification on selected archives
if ! run_borg_check "$ARCHIVE_ARGS" "Data verification ($LAST_ARCHIVES archives)"; then
echo "WARNING: Data verification failed for some archives"
fi
else
echo "No archives found for data verification"
CHECK_DETAILS="${CHECK_DETAILS}⚠ No archives available for data verification\n"
fi
else
echo "Failed to list archives for data verification"
CHECK_DETAILS="${CHECK_DETAILS}⚠ Could not list archives for data verification\n"
fi
fi
# Calculate total duration
CHECK_END=$(date +%s)
CHECK_DURATION=$((CHECK_END - CHECK_START))
# Format duration for display
if [ $CHECK_DURATION -ge 3600 ]; then
DURATION_STR="$(($CHECK_DURATION / 3600))h $(($CHECK_DURATION % 3600 / 60))m"
elif [ $CHECK_DURATION -ge 60 ]; then
DURATION_STR="$(($CHECK_DURATION / 60))m $(($CHECK_DURATION % 60))s"
else
DURATION_STR="${CHECK_DURATION}s"
fi
# Get repository statistics for the notification
REPO_STATS=""
if REPO_INFO=$(borg info --json 2>/dev/null); then
# Try to extract useful stats (this is a simplified version)
if command -v jq >/dev/null 2>&1; then
TOTAL_SIZE=$(echo "$REPO_INFO" | jq -r '.cache.stats.total_size // 0' 2>/dev/null || echo "0")
TOTAL_CHUNKS=$(echo "$REPO_INFO" | jq -r '.cache.stats.total_chunks // 0' 2>/dev/null || echo "0")
if [ "$TOTAL_SIZE" != "0" ]; then
# Convert bytes to human-readable format
TOTAL_SIZE_MB=$((TOTAL_SIZE / 1024 / 1024))
if [ $TOTAL_SIZE_MB -ge 1024 ]; then
TOTAL_SIZE_GB=$((TOTAL_SIZE_MB / 1024))
REPO_STATS="\n\nRepository Stats:\n• Total size: ${TOTAL_SIZE_GB}GB\n• Total chunks: $TOTAL_CHUNKS"
else
REPO_STATS="\n\nRepository Stats:\n• Total size: ${TOTAL_SIZE_MB}MB\n• Total chunks: $TOTAL_CHUNKS"
fi
fi
fi
fi
# Prepare final message
if [ "$CHECK_RESULT" = "SUCCESS" ]; then
SUCCESS_MSG="Integrity check completed successfully for $HOSTNAME\n\n"
SUCCESS_MSG="${SUCCESS_MSG}Repository: $REPO_URL\n"
SUCCESS_MSG="${SUCCESS_MSG}Check depth: $CHECK_DEPTH\n"
SUCCESS_MSG="${SUCCESS_MSG}Duration: $DURATION_STR\n\n"
SUCCESS_MSG="${SUCCESS_MSG}Results:\n$CHECK_DETAILS"
SUCCESS_MSG="${SUCCESS_MSG}$REPO_STATS"
echo "Integrity check completed successfully"
echo -e "$SUCCESS_MSG"
send_notification "normal" \
"Borg Check Passed" \
"$SUCCESS_MSG"
else
FAILURE_MSG="Integrity check found issues for $HOSTNAME\n\n"
FAILURE_MSG="${FAILURE_MSG}Repository: $REPO_URL\n"
FAILURE_MSG="${FAILURE_MSG}Check depth: $CHECK_DEPTH\n"
FAILURE_MSG="${FAILURE_MSG}Duration: $DURATION_STR\n\n"
FAILURE_MSG="${FAILURE_MSG}Issues found:\n$ERRORS_FOUND\n"
FAILURE_MSG="${FAILURE_MSG}Successful checks:\n$CHECK_DETAILS"
FAILURE_MSG="${FAILURE_MSG}$REPO_STATS"
echo "Integrity check completed with errors"
echo -e "$FAILURE_MSG"
send_notification "high" \
"Borg Check Issues" \
"$FAILURE_MSG"
# Exit with error code to indicate failure
exit 1
fi
echo "Integrity check process completed"

View file

@ -1,32 +1,14 @@
# Container update script with Gotify notifications
# Container update script
# Updates all podman containers to latest images
set -euo pipefail
# Configuration from environment (set by systemd service)
GOTIFY_URL="${GOTIFY_URL:-}"
GOTIFY_TOKEN="${GOTIFY_TOKEN:-}"
EXCLUDE_CONTAINERS="${EXCLUDE_CONTAINERS:-}"
# Convert excluded containers to array
IFS=',' read -ra EXCLUDED <<< "$EXCLUDE_CONTAINERS"
# Function to send Gotify notification
send_notification() {
local priority="$1"
local title="$2"
local message="$3"
if [[ -n "$GOTIFY_URL" ]] && [[ -n "$GOTIFY_TOKEN" ]]; then
/home/yanlin/.config/nix/scripts/gotify-notify.sh \
"$GOTIFY_URL" \
"$GOTIFY_TOKEN" \
"$priority" \
"$title" \
"$message" 2>&1 || echo "Failed to send notification"
fi
}
# Get all running containers
echo "Getting list of running containers..."
containers=$(podman ps --format "{{.Names}}")
@ -106,49 +88,29 @@ for container in $containers; do
echo ""
done
# Prepare notification message
notification_lines=()
notification_priority="normal"
# Print summary
echo "=== Update Summary ==="
if [[ ${#updated_containers[@]} -gt 0 ]]; then
notification_lines+=("✅ Updated (${#updated_containers[@]}):")
echo "✅ Updated (${#updated_containers[@]}):"
for container in "${updated_containers[@]}"; do
notification_lines+=("$container")
echo "$container"
done
fi
if [[ ${#failed_containers[@]} -gt 0 ]]; then
notification_priority="high"
notification_lines+=("")
notification_lines+=("❌ Failed (${#failed_containers[@]}):")
echo "❌ Failed (${#failed_containers[@]}):"
for container in "${failed_containers[@]}"; do
notification_lines+=("$container")
echo "$container"
done
fi
if [[ ${#skipped_containers[@]} -gt 0 ]]; then
notification_lines+=("")
notification_lines+=("⏭️ No updates (${#skipped_containers[@]}):")
echo "⏭️ No updates (${#skipped_containers[@]}):"
for container in "${skipped_containers[@]}"; do
notification_lines+=("$container")
echo "$container"
done
fi
# Send notification if there were any updates or failures
if [[ ${#notification_lines[@]} -gt 0 ]]; then
# Build multi-line message similar to borg-client
message=""
for line in "${notification_lines[@]}"; do
if [[ -n "$message" ]]; then
message="${message}\n${line}"
else
message="$line"
fi
done
send_notification "$notification_priority" "Container Update" "$message"
fi
# Exit with error if any containers failed
if [[ ${#failed_containers[@]} -gt 0 ]]; then
echo "ERROR: Some containers failed to update"

View file

@ -1,283 +0,0 @@
# Simple daily SMART report script - plain text version
# Only checks SMART attributes and sends report via Gotify
# Usage: daily-smart-report.sh <gotify_token>
# Drive list should be passed via SMART_DRIVES environment variable as "device:name" pairs
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
GOTIFY_SCRIPT="${SCRIPT_DIR}/gotify-notify.sh"
LOG_FILE="/var/log/daily-smart-report.log"
# Get parameters
GOTIFY_TOKEN="${1:-}"
# Validate parameters
if [[ -z "$GOTIFY_TOKEN" ]]; then
echo "Error: Gotify token not provided"
echo "Usage: $0 <gotify_token>"
echo "Drives should be in SMART_DRIVES environment variable"
exit 1
fi
# Gotify configuration
GOTIFY_URL="https://notify.yanlincs.com"
# Parse drive configurations from environment variable
# SMART_DRIVES format: "device1:name1;device2:name2;..."
declare -A DRIVES=()
if [[ -n "${SMART_DRIVES:-}" ]]; then
IFS=';' read -ra DRIVE_PAIRS <<< "$SMART_DRIVES"
for pair in "${DRIVE_PAIRS[@]}"; do
IFS=':' read -r device name <<< "$pair"
if [[ -n "$device" && -n "$name" ]]; then
DRIVES["$device"]="$name"
fi
done
else
echo "Warning: No drives specified in SMART_DRIVES environment variable"
echo "Format: SMART_DRIVES='device1:name1;device2:name2'"
exit 1
fi
main() {
local report=""
local healthy_drives=0
local total_drives=0
echo "Starting daily SMART report"
report="Daily SMART Report - $(date '+%Y-%m-%d')\n\n"
report+="Drive SMART Status:\n"
# Check each drive
for device in "${!DRIVES[@]}"; do
local device_name="${DRIVES[$device]}"
total_drives=$((total_drives + 1))
echo "Checking $device_name"
# Quick device existence check
if [[ ! -e "$device" ]]; then
report+="[FAIL] $device_name: Device not found\n"
continue
fi
# Check if it's NVMe (for attribute parsing differences)
local is_nvme=false
if [[ "$device" == *"nvme"* ]]; then
is_nvme=true
fi
# Get SMART health
local health="UNKNOWN"
local smartctl_opts=""
if [[ "$is_nvme" == "true" ]]; then
smartctl_opts="-d nvme"
fi
if health=$(smartctl $smartctl_opts -H "$device" 2>/dev/null | grep -o "PASSED\|FAILED" | head -1); then
echo " Health: $health"
else
health="UNKNOWN"
echo " Health: $health"
fi
# Get enhanced SMART data
local temp="N/A"
local power_hours="N/A"
local wear_info=""
local data_info=""
local error_info=""
if [[ "$health" == "PASSED" ]]; then
local smart_data
smart_data=$(smartctl $smartctl_opts -A "$device" 2>/dev/null)
if [[ "$is_nvme" == "true" ]]; then
# NVMe attributes (different format)
temp=$(echo "$smart_data" | awk '/^Temperature:/ {print $2}' | head -1)
if [[ -n "$temp" && "$temp" =~ ^[0-9]+$ ]]; then
temp="${temp}C"
else
temp="N/A"
fi
power_hours=$(echo "$smart_data" | awk '/^Power On Hours:/ {print $4}' | sed 's/,//g')
local percentage_used
percentage_used=$(echo "$smart_data" | awk '/^Percentage Used:/ {print $3}' | tr -d '%')
if [[ -n "$percentage_used" ]]; then
wear_info="Wear: ${percentage_used}%"
fi
local data_read data_written
data_read=$(echo "$smart_data" | awk '/^Data Units Read:/ {match($0, /\[([^\]]+)\]/, arr); print arr[1]}')
data_written=$(echo "$smart_data" | awk '/^Data Units Written:/ {match($0, /\[([^\]]+)\]/, arr); print arr[1]}')
if [[ -n "$data_read" && -n "$data_written" ]]; then
data_info="Data: R:${data_read} W:${data_written}"
fi
local unsafe_shutdowns media_errors
unsafe_shutdowns=$(echo "$smart_data" | awk '/^Unsafe Shutdowns:/ {print $3}')
media_errors=$(echo "$smart_data" | awk '/^Media and Data Integrity Errors:/ {print $6}')
local error_parts=()
if [[ -n "$unsafe_shutdowns" && "$unsafe_shutdowns" -gt 0 ]]; then
error_parts+=("UnsafeShutdowns:$unsafe_shutdowns")
fi
if [[ -n "$media_errors" && "$media_errors" -gt 0 ]]; then
error_parts+=("MediaErrors:$media_errors")
fi
if [[ ${#error_parts[@]} -gt 0 ]]; then
error_info=$(IFS=' '; echo "${error_parts[*]}")
fi
else
# SATA/SAS drives - try to get all available attributes
# Temperature
temp=$(echo "$smart_data" | awk '/Temperature_Celsius/ {print $10}' | head -1)
if [[ -n "$temp" && "$temp" =~ ^[0-9]+$ ]]; then
temp="${temp}C"
else
temp="N/A"
fi
# Power on hours
power_hours=$(echo "$smart_data" | awk '/Power_On_Hours/ {print $10}' | head -1)
# Wear indicators (for SSDs)
local wear_level media_wearout percentage_used
wear_level=$(echo "$smart_data" | awk '/Wear_Leveling_Count/ {print $4}' | head -1)
media_wearout=$(echo "$smart_data" | awk '/Media_Wearout_Indicator/ {print $4}' | head -1)
if [[ -n "$wear_level" ]]; then
percentage_used=$((100 - wear_level))
wear_info="Wear: ${percentage_used}%"
elif [[ -n "$media_wearout" ]]; then
percentage_used=$((100 - media_wearout))
wear_info="Wear: ${percentage_used}%"
fi
# Data read/written
local lbas_written lbas_read data_written data_read
lbas_written=$(echo "$smart_data" | awk '/Total_LBAs_Written/ {print $10}' | head -1)
lbas_read=$(echo "$smart_data" | awk '/Total_LBAs_Read/ {print $10}' | head -1)
if [[ -n "$lbas_written" && -n "$lbas_read" ]]; then
# Convert LBAs to GB (1 LBA = 512 bytes)
local gb_written_int gb_read_int
gb_written_int=$((lbas_written / 2097152)) # LBAs * 512 / 1GB
gb_read_int=$((lbas_read / 2097152))
# Format with appropriate units
if [[ $gb_written_int -gt 1000 ]]; then
local tb_written_int=$((gb_written_int / 1024))
local tb_written_dec=$(( (gb_written_int % 1024) * 10 / 1024 ))
data_written="${tb_written_int}.${tb_written_dec} TB"
else
data_written="${gb_written_int} GB"
fi
if [[ $gb_read_int -gt 1000 ]]; then
local tb_read_int=$((gb_read_int / 1024))
local tb_read_dec=$(( (gb_read_int % 1024) * 10 / 1024 ))
data_read="${tb_read_int}.${tb_read_dec} TB"
else
data_read="${gb_read_int} GB"
fi
data_info="Data: R:${data_read} W:${data_written}"
fi
# Check for various error indicators
local power_cycles reallocated pending_sectors offline_uncorrectable
power_cycles=$(echo "$smart_data" | awk '/Power_Cycle_Count/ {print $10}' | head -1)
reallocated=$(echo "$smart_data" | awk '/Reallocated_Sector_Ct/ {print $10}' | head -1)
pending_sectors=$(echo "$smart_data" | awk '/Current_Pending_Sector/ {print $10}' | head -1)
offline_uncorrectable=$(echo "$smart_data" | awk '/Offline_Uncorrectable/ {print $10}' | head -1)
local error_parts=()
if [[ -n "$power_cycles" && "$power_cycles" -gt 0 ]]; then
error_parts+=("PowerCycles:$power_cycles")
fi
if [[ -n "$reallocated" && "$reallocated" -gt 0 ]]; then
error_parts+=("Reallocated:$reallocated")
fi
if [[ -n "$pending_sectors" && "$pending_sectors" -gt 0 ]]; then
error_parts+=("PendingSectors:$pending_sectors")
fi
if [[ -n "$offline_uncorrectable" && "$offline_uncorrectable" -gt 0 ]]; then
error_parts+=("OfflineUncorrectable:$offline_uncorrectable")
fi
if [[ ${#error_parts[@]} -gt 0 ]]; then
error_info=$(IFS=' '; echo "${error_parts[*]}")
fi
fi
echo " Temperature: $temp"
echo " Power Hours: $power_hours"
[[ -n "$wear_info" ]] && echo " $wear_info"
[[ -n "$data_info" ]] && echo " $data_info"
[[ -n "$error_info" ]] && echo " $error_info"
fi
# Format output
if [[ "$health" == "PASSED" ]]; then
report+="[OK] $device_name: $health\\n"
report+=" Temp: $temp"
if [[ "$power_hours" != "N/A" ]]; then
report+=", PowerOn: ${power_hours}h"
fi
if [[ -n "$wear_info" ]]; then
report+=", $wear_info"
fi
report+="\\n"
if [[ -n "$data_info" ]]; then
report+=" $data_info\\n"
fi
if [[ -n "$error_info" ]]; then
report+=" ⚠️ $error_info\\n"
fi
healthy_drives=$((healthy_drives + 1))
else
report+="[FAIL] $device_name: $health\\n"
if [[ "$temp" != "N/A" ]]; then
report+=" Temp: $temp\\n"
fi
fi
done
# Add summary
report+="\nSummary:\n"
if [[ $healthy_drives -eq $total_drives ]]; then
report+="Status: All $total_drives drives healthy\n"
report+="Next check: $(date -d 'tomorrow 08:00' '+%Y-%m-%d 08:00')"
echo "Result: All drives healthy ($healthy_drives/$total_drives)"
# Send notification
if [[ -x "$GOTIFY_SCRIPT" ]]; then
"$GOTIFY_SCRIPT" "$GOTIFY_URL" "$GOTIFY_TOKEN" "normal" "Daily SMART Report" "$report"
fi
else
local issues=$((total_drives - healthy_drives))
report+="Status: $issues of $total_drives drives have issues"
echo "Result: Issues detected ($healthy_drives/$total_drives drives healthy)"
# Send high priority notification for issues
if [[ -x "$GOTIFY_SCRIPT" ]]; then
"$GOTIFY_SCRIPT" "$GOTIFY_URL" "$GOTIFY_TOKEN" "high" "Daily SMART Report - Issues Detected" "$report"
fi
fi
# Simple logging
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Daily SMART report: $healthy_drives/$total_drives drives healthy" >> "$LOG_FILE" 2>/dev/null || true
echo "Daily SMART report completed"
}
main "$@"

View file

@ -1,72 +0,0 @@
# Gotify notification script for disk health monitoring
# Usage: gotify-notify.sh <url> <token> <priority> <title> <message>
set -euo pipefail
# Validate arguments
if [[ $# -ne 5 ]]; then
echo "Usage: $0 <url> <token> <priority> <title> <message>"
echo "Example: $0 'https://notify.yanlincs.com' 'token123' 'high' 'Disk Alert' 'Drive temperature critical'"
exit 1
fi
# Get parameters
GOTIFY_URL="$1"
GOTIFY_TOKEN="$2"
priority="$3"
title="$4"
message="$5"
MAX_RETRIES=3
RETRY_DELAY=5
# Priority mapping: 1=low, 5=normal, 8=high, 10=critical
declare -A PRIORITY_MAP=(
["low"]="1"
["normal"]="5"
["high"]="8"
["critical"]="10"
)
send_notification() {
local priority="$1"
local title="$2"
local message="$3"
local attempt=1
# Map priority to numeric value
local numeric_priority="${PRIORITY_MAP[$priority]:-5}"
while [ $attempt -le $MAX_RETRIES ]; do
if curl -s -o /dev/null -w "%{http_code}" \
-X POST "${GOTIFY_URL}/message" \
-H "X-Gotify-Key: ${GOTIFY_TOKEN}" \
-H "Content-Type: application/json" \
-d "{
\"title\": \"${title}\",
\"message\": \"${message}\",
\"priority\": ${numeric_priority}
}" | grep -q "200"; then
echo "Notification sent successfully (attempt $attempt)"
return 0
else
echo "Failed to send notification (attempt $attempt/$MAX_RETRIES)"
if [ $attempt -lt $MAX_RETRIES ]; then
sleep $RETRY_DELAY
fi
((attempt++))
fi
done
echo "ERROR: Failed to send notification after $MAX_RETRIES attempts" >&2
return 1
}
# Validate priority
if [[ ! ${PRIORITY_MAP[$priority]+_} ]]; then
echo "Error: Invalid priority '$priority'. Use: low, normal, high, critical"
exit 1
fi
# Send notification
send_notification "$priority" "$title" "$message"