986 lines
33 KiB
Bash
Executable File
986 lines
33 KiB
Bash
Executable File
#!/bin/bash
|
|
#
|
|
# RWA Mining Ecosystem 2.0 - Deployment & Management Script
|
|
# ==========================================================
|
|
#
|
|
# This script manages the Mining 2.0 ecosystem independently from the 1.0 system.
|
|
# The 2.0 system is completely isolated and can be reset at any time without
|
|
# affecting the 1.0 system.
|
|
#
|
|
# Usage:
|
|
# ./deploy-mining.sh up [service] # Start all or specific service
|
|
# ./deploy-mining.sh down [service] # Stop all or specific service
|
|
# ./deploy-mining.sh restart [service] # Restart all or specific service
|
|
# ./deploy-mining.sh status # Show 2.0 service status
|
|
# ./deploy-mining.sh logs <service> # View logs for specific service
|
|
# ./deploy-mining.sh build [service] [--no-cache] # Build all or specific service
|
|
# ./deploy-mining.sh rebuild [service] # Rebuild with --no-cache
|
|
#
|
|
# Database Management:
|
|
# ./deploy-mining.sh db-create # Create 2.0 databases
|
|
# ./deploy-mining.sh db-migrate # Run Prisma migrations
|
|
# ./deploy-mining.sh db-reset # Drop and recreate 2.0 databases (DANGEROUS!)
|
|
# ./deploy-mining.sh db-status # Show database status
|
|
#
|
|
# CDC & Sync:
|
|
# ./deploy-mining.sh sync-reset # Reset CDC consumer offsets to beginning
|
|
# ./deploy-mining.sh sync-status # Show CDC consumer group status
|
|
#
|
|
# Full Reset (for development/testing):
|
|
# ./deploy-mining.sh full-reset # Complete reset: stop services, drop DBs, recreate, resync
|
|
#
|
|
# Health & Monitoring:
|
|
# ./deploy-mining.sh health # Check health of all 2.0 services
|
|
# ./deploy-mining.sh stats # Show system statistics
|
|
#
|
|
# Service Aliases:
|
|
# contrib, contribution -> contribution-service
|
|
# mining -> mining-service
|
|
# trading -> trading-service
|
|
# admin -> mining-admin-service
|
|
# auth -> auth-service
|
|
# wallet -> mining-wallet-service
|
|
#
|
|
|
|
set -e
|
|
|
|
# ===========================================================================
|
|
# Configuration
|
|
# ===========================================================================
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
ENV_FILE="$SCRIPT_DIR/.env"
|
|
COMPOSE_FILE="$SCRIPT_DIR/docker-compose.2.0.yml"
|
|
|
|
# 2.0 Services
|
|
MINING_SERVICES=(
|
|
"contribution-service"
|
|
"mining-service"
|
|
"trading-service"
|
|
"mining-admin-service"
|
|
"auth-service"
|
|
"mining-wallet-service"
|
|
)
|
|
|
|
# Service Aliases
|
|
declare -A SERVICE_ALIASES=(
|
|
["contrib"]="contribution-service"
|
|
["contribution"]="contribution-service"
|
|
["mining"]="mining-service"
|
|
["trading"]="trading-service"
|
|
["admin"]="mining-admin-service"
|
|
["auth"]="auth-service"
|
|
["wallet"]="mining-wallet-service"
|
|
)
|
|
|
|
# 2.0 Databases
|
|
MINING_DATABASES=(
|
|
"rwa_contribution"
|
|
"rwa_mining"
|
|
"rwa_trading"
|
|
"rwa_mining_admin"
|
|
"rwa_auth"
|
|
"rwa_mining_wallet"
|
|
)
|
|
|
|
# Service to Database mapping
|
|
declare -A SERVICE_DB=(
|
|
["contribution-service"]="rwa_contribution"
|
|
["mining-service"]="rwa_mining"
|
|
["trading-service"]="rwa_trading"
|
|
["mining-admin-service"]="rwa_mining_admin"
|
|
["auth-service"]="rwa_auth"
|
|
["mining-wallet-service"]="rwa_mining_wallet"
|
|
)
|
|
|
|
# 2.0 Ports
|
|
declare -A SERVICE_PORTS=(
|
|
["contribution-service"]="3020"
|
|
["mining-service"]="3021"
|
|
["trading-service"]="3022"
|
|
["mining-admin-service"]="3023"
|
|
["auth-service"]="3024"
|
|
["mining-wallet-service"]="3025"
|
|
)
|
|
|
|
# CDC Consumer Group
|
|
CDC_CONSUMER_GROUP="contribution-service-cdc-group"
|
|
|
|
# Colors
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
CYAN='\033[0;36m'
|
|
MAGENTA='\033[0;35m'
|
|
NC='\033[0m'
|
|
BOLD='\033[1m'
|
|
|
|
# ===========================================================================
|
|
# Logging Functions
|
|
# ===========================================================================
|
|
log_info() { echo -e "${GREEN}[INFO]${NC} $1"; }
|
|
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
|
|
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
|
|
log_step() { echo -e "${BLUE}[STEP]${NC} $1"; }
|
|
log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
|
|
|
|
print_header() {
|
|
echo ""
|
|
echo -e "${CYAN}╔════════════════════════════════════════════════════════════╗${NC}"
|
|
echo -e "${CYAN}║${NC} ${BOLD}RWA Mining Ecosystem 2.0 - Management Script${NC} ${CYAN}║${NC}"
|
|
echo -e "${CYAN}╚════════════════════════════════════════════════════════════╝${NC}"
|
|
echo ""
|
|
}
|
|
|
|
print_section() {
|
|
echo ""
|
|
echo -e "${MAGENTA}━━━ $1 ━━━${NC}"
|
|
echo ""
|
|
}
|
|
|
|
# ===========================================================================
|
|
# Helper Functions
|
|
# ===========================================================================
|
|
resolve_service_name() {
|
|
local input="$1"
|
|
|
|
# Check if it's an alias
|
|
if [ -n "${SERVICE_ALIASES[$input]:-}" ]; then
|
|
echo "${SERVICE_ALIASES[$input]}"
|
|
return 0
|
|
fi
|
|
|
|
# Check if it's a valid service name
|
|
for service in "${MINING_SERVICES[@]}"; do
|
|
if [ "$service" = "$input" ]; then
|
|
echo "$service"
|
|
return 0
|
|
fi
|
|
done
|
|
|
|
# Not found
|
|
return 1
|
|
}
|
|
|
|
validate_service() {
|
|
local service="$1"
|
|
for s in "${MINING_SERVICES[@]}"; do
|
|
if [ "$s" = "$service" ]; then
|
|
return 0
|
|
fi
|
|
done
|
|
return 1
|
|
}
|
|
|
|
get_services_to_process() {
|
|
local input="$1"
|
|
|
|
if [ -z "$input" ]; then
|
|
# Return all services
|
|
echo "${MINING_SERVICES[@]}"
|
|
else
|
|
# Resolve and return single service
|
|
local resolved
|
|
resolved=$(resolve_service_name "$input") || {
|
|
log_error "Unknown service: $input"
|
|
echo ""
|
|
echo "Available services:"
|
|
for service in "${MINING_SERVICES[@]}"; do
|
|
echo " - $service (port ${SERVICE_PORTS[$service]})"
|
|
done
|
|
echo ""
|
|
echo "Aliases:"
|
|
echo " - contrib, contribution -> contribution-service"
|
|
echo " - mining -> mining-service"
|
|
echo " - trading -> trading-service"
|
|
echo " - admin -> mining-admin-service"
|
|
exit 1
|
|
}
|
|
echo "$resolved"
|
|
fi
|
|
}
|
|
|
|
# ===========================================================================
|
|
# Environment Loading
|
|
# ===========================================================================
|
|
load_env() {
|
|
if [ -f "$ENV_FILE" ]; then
|
|
set -a
|
|
source "$ENV_FILE"
|
|
set +a
|
|
else
|
|
log_warn "No .env file found, using defaults"
|
|
fi
|
|
|
|
# Set defaults
|
|
POSTGRES_HOST="${POSTGRES_HOST:-localhost}"
|
|
POSTGRES_PORT="${POSTGRES_PORT:-5432}"
|
|
POSTGRES_USER="${POSTGRES_USER:-postgres}"
|
|
POSTGRES_PASSWORD="${POSTGRES_PASSWORD:-postgres}"
|
|
KAFKA_BROKERS="${KAFKA_BROKERS:-localhost:9092}"
|
|
REDIS_HOST="${REDIS_HOST:-localhost}"
|
|
REDIS_PORT="${REDIS_PORT:-6379}"
|
|
|
|
# Docker container names
|
|
POSTGRES_CONTAINER="${POSTGRES_CONTAINER:-postgres}"
|
|
KAFKA_CONTAINER="${KAFKA_CONTAINER:-kafka}"
|
|
}
|
|
|
|
# ===========================================================================
|
|
# Helper: Execute psql command (auto-detect Docker or local)
|
|
# ===========================================================================
|
|
run_psql() {
|
|
local db="$1"
|
|
local sql="$2"
|
|
|
|
# Try docker exec first (for production Docker environment)
|
|
if docker ps --format '{{.Names}}' 2>/dev/null | grep -q "^${POSTGRES_CONTAINER}$"; then
|
|
docker exec -e PGPASSWORD="$POSTGRES_PASSWORD" "$POSTGRES_CONTAINER" \
|
|
psql -h localhost -U "$POSTGRES_USER" -d "$db" -c "$sql" 2>/dev/null
|
|
return $?
|
|
fi
|
|
|
|
# Fall back to local psql
|
|
PGPASSWORD="$POSTGRES_PASSWORD" psql -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -U "$POSTGRES_USER" -d "$db" -c "$sql" 2>/dev/null
|
|
return $?
|
|
}
|
|
|
|
# ===========================================================================
|
|
# Database Functions
|
|
# ===========================================================================
|
|
db_create() {
|
|
local target_service="$1"
|
|
print_section "Creating 2.0 Databases"
|
|
|
|
local dbs_to_create=()
|
|
if [ -n "$target_service" ]; then
|
|
dbs_to_create=("${SERVICE_DB[$target_service]}")
|
|
else
|
|
dbs_to_create=("${MINING_DATABASES[@]}")
|
|
fi
|
|
|
|
for db in "${dbs_to_create[@]}"; do
|
|
log_step "Creating database: $db"
|
|
if run_psql "postgres" "CREATE DATABASE $db;"; then
|
|
log_success "Database $db created"
|
|
else
|
|
log_warn "Database $db already exists or creation failed"
|
|
fi
|
|
done
|
|
|
|
log_success "Database creation completed"
|
|
}
|
|
|
|
db_drop() {
|
|
local target_service="$1"
|
|
print_section "Dropping 2.0 Databases"
|
|
|
|
local dbs_to_drop=()
|
|
if [ -n "$target_service" ]; then
|
|
dbs_to_drop=("${SERVICE_DB[$target_service]}")
|
|
else
|
|
dbs_to_drop=("${MINING_DATABASES[@]}")
|
|
fi
|
|
|
|
for db in "${dbs_to_drop[@]}"; do
|
|
log_step "Dropping database: $db"
|
|
if run_psql "postgres" "DROP DATABASE IF EXISTS $db WITH (FORCE);"; then
|
|
log_success "Database $db dropped"
|
|
else
|
|
log_warn "Failed to drop database $db"
|
|
fi
|
|
done
|
|
|
|
log_success "Database drop completed"
|
|
}
|
|
|
|
db_migrate() {
|
|
local target_service="$1"
|
|
print_section "Running Prisma Migrations"
|
|
|
|
local services_to_migrate=()
|
|
if [ -n "$target_service" ]; then
|
|
services_to_migrate=("$target_service")
|
|
else
|
|
services_to_migrate=("${MINING_SERVICES[@]}")
|
|
fi
|
|
|
|
for service in "${services_to_migrate[@]}"; do
|
|
service_dir="$SCRIPT_DIR/$service"
|
|
if [ -d "$service_dir/prisma" ]; then
|
|
log_step "Migrating: $service"
|
|
|
|
# Check if running in Docker environment (container exists)
|
|
local container_name="rwa-${service}"
|
|
if docker ps -a --format '{{.Names}}' 2>/dev/null | grep -q "^${container_name}$"; then
|
|
# Run migration inside the container
|
|
log_info "Running migration in container: $container_name"
|
|
docker start "$container_name" 2>/dev/null || true
|
|
sleep 2
|
|
docker exec "$container_name" npx prisma migrate deploy 2>/dev/null || \
|
|
docker exec "$container_name" npx prisma db push --accept-data-loss 2>/dev/null || {
|
|
log_warn "Container migration failed, trying to build and run temporary container..."
|
|
# Build and run a temporary container for migration
|
|
docker compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" run --rm "$service" npx prisma migrate deploy 2>/dev/null || \
|
|
docker compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" run --rm "$service" npx prisma db push --accept-data-loss 2>/dev/null || {
|
|
log_warn "Migration failed for $service"
|
|
}
|
|
}
|
|
elif command -v npx &>/dev/null; then
|
|
# Local development: use npx directly
|
|
cd "$service_dir"
|
|
npx prisma migrate deploy 2>/dev/null || npx prisma db push --accept-data-loss
|
|
cd "$SCRIPT_DIR"
|
|
else
|
|
# No npx and no container - try docker compose run
|
|
log_info "No local npx, using docker compose run for migration"
|
|
docker compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" run --rm "$service" npx prisma migrate deploy 2>/dev/null || \
|
|
docker compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" run --rm "$service" npx prisma db push --accept-data-loss 2>/dev/null || {
|
|
log_warn "Migration failed for $service"
|
|
}
|
|
fi
|
|
fi
|
|
done
|
|
|
|
log_success "Migrations completed"
|
|
}
|
|
|
|
db_status() {
|
|
print_section "2.0 Database Status"
|
|
|
|
echo -e "${BOLD}Database${NC}\t\t${BOLD}Status${NC}\t\t${BOLD}Tables${NC}"
|
|
echo "────────────────────────────────────────────────────"
|
|
|
|
for db in "${MINING_DATABASES[@]}"; do
|
|
if run_psql "$db" "SELECT 1" &>/dev/null; then
|
|
# Get table count using run_psql helper
|
|
local table_count
|
|
if docker ps --format '{{.Names}}' 2>/dev/null | grep -q "^${POSTGRES_CONTAINER}$"; then
|
|
table_count=$(docker exec -e PGPASSWORD="$POSTGRES_PASSWORD" "$POSTGRES_CONTAINER" \
|
|
psql -h localhost -U "$POSTGRES_USER" -d "$db" -t -c "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public';" 2>/dev/null | tr -d ' ')
|
|
else
|
|
table_count=$(PGPASSWORD="$POSTGRES_PASSWORD" psql -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -U "$POSTGRES_USER" -d "$db" -t -c "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public';" 2>/dev/null | tr -d ' ')
|
|
fi
|
|
echo -e "${GREEN}$db${NC}\t${GREEN}UP${NC}\t\t$table_count tables"
|
|
else
|
|
echo -e "${RED}$db${NC}\t\t${RED}DOWN${NC}\t\t-"
|
|
fi
|
|
done
|
|
}
|
|
|
|
db_reset() {
|
|
local target_service="$1"
|
|
print_section "Resetting 2.0 Databases"
|
|
|
|
local dbs_to_reset=()
|
|
if [ -n "$target_service" ]; then
|
|
dbs_to_reset=("${SERVICE_DB[$target_service]}")
|
|
echo -e "${RED}${BOLD}WARNING: This will DELETE data for $target_service!${NC}"
|
|
else
|
|
dbs_to_reset=("${MINING_DATABASES[@]}")
|
|
echo -e "${RED}${BOLD}WARNING: This will DELETE ALL 2.0 DATA!${NC}"
|
|
fi
|
|
|
|
echo "Affected databases:"
|
|
for db in "${dbs_to_reset[@]}"; do
|
|
echo " - $db"
|
|
done
|
|
echo ""
|
|
read -p "Are you sure? Type 'yes' to confirm: " confirm
|
|
|
|
if [ "$confirm" != "yes" ]; then
|
|
log_warn "Aborted"
|
|
return 1
|
|
fi
|
|
|
|
db_drop "$target_service"
|
|
db_create "$target_service"
|
|
db_migrate "$target_service"
|
|
|
|
log_success "Database reset completed"
|
|
}
|
|
|
|
# ===========================================================================
|
|
# Single Service Functions
|
|
# ===========================================================================
|
|
service_start() {
|
|
local service="$1"
|
|
local port="${SERVICE_PORTS[$service]}"
|
|
|
|
log_step "Starting: $service (port $port)"
|
|
docker compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" up -d "$service"
|
|
log_success "$service started"
|
|
}
|
|
|
|
service_stop() {
|
|
local service="$1"
|
|
|
|
log_step "Stopping: $service"
|
|
docker compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" stop "$service"
|
|
log_success "$service stopped"
|
|
}
|
|
|
|
service_restart() {
|
|
local service="$1"
|
|
log_step "Restarting: $service"
|
|
docker compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" restart "$service"
|
|
log_success "$service restarted"
|
|
}
|
|
|
|
service_build() {
|
|
local service="$1"
|
|
local no_cache="$2"
|
|
|
|
log_step "Building: $service"
|
|
|
|
# Use docker compose to build
|
|
if [ "$no_cache" = "--no-cache" ] || [ "$no_cache" = "true" ]; then
|
|
log_info "Building Docker image (no cache)..."
|
|
docker compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" build --no-cache "$service"
|
|
else
|
|
log_info "Building Docker image..."
|
|
docker compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" build "$service"
|
|
fi
|
|
|
|
log_success "$service built successfully"
|
|
}
|
|
|
|
service_rebuild() {
|
|
local service="$1"
|
|
local no_cache="$2"
|
|
|
|
# 1. Build the service
|
|
service_build "$service" "$no_cache"
|
|
|
|
# 2. Stop the old service
|
|
log_info "Stopping old $service..."
|
|
service_stop "$service"
|
|
|
|
# 3. Start the new service
|
|
log_info "Starting new $service..."
|
|
service_start "$service"
|
|
|
|
log_success "$service rebuilt and restarted successfully"
|
|
}
|
|
|
|
# ===========================================================================
|
|
# Batch Service Functions
|
|
# ===========================================================================
|
|
services_up() {
|
|
local target="$1"
|
|
print_section "Starting 2.0 Services"
|
|
|
|
local services
|
|
services=$(get_services_to_process "$target")
|
|
[ -z "$services" ] && exit 1
|
|
|
|
for service in $services; do
|
|
service_start "$service"
|
|
done
|
|
|
|
log_success "Service startup completed"
|
|
}
|
|
|
|
services_down() {
|
|
local target="$1"
|
|
print_section "Stopping 2.0 Services"
|
|
|
|
local services
|
|
services=$(get_services_to_process "$target")
|
|
[ -z "$services" ] && exit 1
|
|
|
|
for service in $services; do
|
|
service_stop "$service"
|
|
done
|
|
|
|
log_success "Services stopped"
|
|
}
|
|
|
|
services_restart() {
|
|
local target="$1"
|
|
print_section "Restarting 2.0 Services"
|
|
|
|
local services
|
|
services=$(get_services_to_process "$target")
|
|
[ -z "$services" ] && exit 1
|
|
|
|
for service in $services; do
|
|
service_restart "$service"
|
|
done
|
|
|
|
log_success "Services restarted"
|
|
}
|
|
|
|
services_build() {
|
|
local target="$1"
|
|
local no_cache="$2"
|
|
print_section "Building 2.0 Services"
|
|
|
|
local services
|
|
services=$(get_services_to_process "$target")
|
|
[ -z "$services" ] && exit 1
|
|
|
|
for service in $services; do
|
|
service_build "$service" "$no_cache"
|
|
done
|
|
|
|
log_success "Build completed"
|
|
}
|
|
|
|
services_rebuild() {
|
|
local target="$1"
|
|
print_section "Rebuilding 2.0 Services"
|
|
|
|
local services
|
|
services=$(get_services_to_process "$target")
|
|
[ -z "$services" ] && exit 1
|
|
|
|
for service in $services; do
|
|
service_rebuild "$service" "--no-cache"
|
|
done
|
|
|
|
log_success "Rebuild completed"
|
|
}
|
|
|
|
services_status() {
|
|
print_section "2.0 Service Status"
|
|
|
|
echo -e "${BOLD}Service${NC}\t\t\t${BOLD}Port${NC}\t${BOLD}Status${NC}\t\t${BOLD}Health${NC}\t\t${BOLD}PID${NC}"
|
|
echo "────────────────────────────────────────────────────────────────────────"
|
|
|
|
for service in "${MINING_SERVICES[@]}"; do
|
|
port="${SERVICE_PORTS[$service]}"
|
|
pid="-"
|
|
|
|
# Get PID
|
|
if [ -f "/tmp/$service.pid" ]; then
|
|
pid=$(cat /tmp/$service.pid 2>/dev/null || echo "-")
|
|
fi
|
|
|
|
# Check if port is listening
|
|
if nc -z localhost "$port" 2>/dev/null; then
|
|
status="${GREEN}RUNNING${NC}"
|
|
|
|
# Get PID from port if not in file
|
|
if [ "$pid" = "-" ]; then
|
|
pid=$(lsof -t -i:$port 2>/dev/null | head -1 || echo "-")
|
|
fi
|
|
|
|
# Check health endpoint
|
|
health_response=$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:$port/health" 2>/dev/null || echo "000")
|
|
if [ "$health_response" = "200" ]; then
|
|
health="${GREEN}HEALTHY${NC}"
|
|
else
|
|
health="${YELLOW}UNKNOWN${NC}"
|
|
fi
|
|
else
|
|
status="${RED}STOPPED${NC}"
|
|
health="${RED}-${NC}"
|
|
pid="-"
|
|
fi
|
|
|
|
echo -e "$service\t$port\t$status\t$health\t\t$pid"
|
|
done
|
|
}
|
|
|
|
services_logs() {
|
|
local service="$1"
|
|
local lines="${2:-100}"
|
|
|
|
if [ -z "$service" ]; then
|
|
log_error "Please specify a service name"
|
|
echo ""
|
|
echo "Usage: $0 logs <service> [lines]"
|
|
echo ""
|
|
echo "Available services:"
|
|
for svc in "${MINING_SERVICES[@]}"; do
|
|
echo " - $svc"
|
|
done
|
|
exit 1
|
|
fi
|
|
|
|
local resolved
|
|
resolved=$(resolve_service_name "$service") || {
|
|
log_error "Unknown service: $service"
|
|
exit 1
|
|
}
|
|
|
|
docker compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" logs -f --tail="$lines" "$resolved"
|
|
}
|
|
|
|
# ===========================================================================
|
|
# CDC / Sync Functions
|
|
# ===========================================================================
|
|
sync_reset() {
|
|
print_section "Resetting CDC Consumer Offsets"
|
|
|
|
echo -e "${YELLOW}This will reset the CDC consumer to read from the beginning.${NC}"
|
|
echo "Consumer Group: $CDC_CONSUMER_GROUP"
|
|
echo ""
|
|
read -p "Continue? (y/n): " confirm
|
|
|
|
if [ "$confirm" != "y" ]; then
|
|
log_warn "Aborted"
|
|
return 1
|
|
fi
|
|
|
|
# Stop contribution-service first
|
|
log_step "Stopping contribution-service"
|
|
service_stop "contribution-service"
|
|
|
|
# Reset offsets
|
|
log_step "Resetting consumer group offsets"
|
|
kafka-consumer-groups.sh --bootstrap-server "$KAFKA_BROKERS" \
|
|
--group "$CDC_CONSUMER_GROUP" \
|
|
--reset-offsets \
|
|
--to-earliest \
|
|
--all-topics \
|
|
--execute 2>/dev/null || {
|
|
# Try with docker
|
|
docker exec -it kafka kafka-consumer-groups.sh --bootstrap-server localhost:9092 \
|
|
--group "$CDC_CONSUMER_GROUP" \
|
|
--reset-offsets \
|
|
--to-earliest \
|
|
--all-topics \
|
|
--execute 2>/dev/null || log_warn "Could not reset offsets automatically"
|
|
}
|
|
|
|
log_success "CDC consumer offsets reset to beginning"
|
|
log_info "Start contribution-service to begin syncing from the beginning"
|
|
}
|
|
|
|
sync_status() {
|
|
print_section "CDC Sync Status"
|
|
|
|
echo -e "${BOLD}Consumer Group:${NC} $CDC_CONSUMER_GROUP"
|
|
echo ""
|
|
|
|
kafka-consumer-groups.sh --bootstrap-server "$KAFKA_BROKERS" \
|
|
--group "$CDC_CONSUMER_GROUP" \
|
|
--describe 2>/dev/null || {
|
|
docker exec -it kafka kafka-consumer-groups.sh --bootstrap-server localhost:9092 \
|
|
--group "$CDC_CONSUMER_GROUP" \
|
|
--describe 2>/dev/null || log_warn "Could not get consumer group status"
|
|
}
|
|
}
|
|
|
|
# ===========================================================================
|
|
# Full Reset Function
|
|
# ===========================================================================
|
|
full_reset() {
|
|
print_section "Full 2.0 System Reset"
|
|
|
|
echo -e "${RED}${BOLD}╔════════════════════════════════════════════════════════════╗${NC}"
|
|
echo -e "${RED}${BOLD}║ WARNING: This will completely reset the 2.0 system! ║${NC}"
|
|
echo -e "${RED}${BOLD}║ ║${NC}"
|
|
echo -e "${RED}${BOLD}║ - Stop all 2.0 services ║${NC}"
|
|
echo -e "${RED}${BOLD}║ - Drop all 2.0 databases ║${NC}"
|
|
echo -e "${RED}${BOLD}║ - Recreate databases ║${NC}"
|
|
echo -e "${RED}${BOLD}║ - Run migrations ║${NC}"
|
|
echo -e "${RED}${BOLD}║ - Reset CDC consumer offsets ║${NC}"
|
|
echo -e "${RED}${BOLD}║ - Restart services (will sync from 1.0) ║${NC}"
|
|
echo -e "${RED}${BOLD}║ ║${NC}"
|
|
echo -e "${RED}${BOLD}║ This will NOT affect the 1.0 system in any way. ║${NC}"
|
|
echo -e "${RED}${BOLD}╚════════════════════════════════════════════════════════════╝${NC}"
|
|
echo ""
|
|
read -p "Type 'RESET' to confirm: " confirm
|
|
|
|
if [ "$confirm" != "RESET" ]; then
|
|
log_warn "Aborted"
|
|
return 1
|
|
fi
|
|
|
|
echo ""
|
|
log_step "Step 1/6: Stopping 2.0 services..."
|
|
for service in "${MINING_SERVICES[@]}"; do
|
|
service_stop "$service"
|
|
done
|
|
|
|
log_step "Step 2/6: Dropping 2.0 databases..."
|
|
db_drop
|
|
|
|
log_step "Step 3/6: Creating 2.0 databases..."
|
|
db_create
|
|
|
|
log_step "Step 4/6: Running migrations..."
|
|
db_migrate
|
|
|
|
log_step "Step 5/6: Resetting CDC consumer offsets..."
|
|
kafka-consumer-groups.sh --bootstrap-server "$KAFKA_BROKERS" \
|
|
--group "$CDC_CONSUMER_GROUP" \
|
|
--reset-offsets \
|
|
--to-earliest \
|
|
--all-topics \
|
|
--execute 2>/dev/null || {
|
|
docker exec kafka kafka-consumer-groups.sh --bootstrap-server localhost:9092 \
|
|
--group "$CDC_CONSUMER_GROUP" \
|
|
--reset-offsets \
|
|
--to-earliest \
|
|
--all-topics \
|
|
--execute 2>/dev/null || log_warn "Could not reset offsets, may need manual reset"
|
|
}
|
|
|
|
log_step "Step 6/6: Starting 2.0 services..."
|
|
for service in "${MINING_SERVICES[@]}"; do
|
|
service_start "$service"
|
|
done
|
|
|
|
echo ""
|
|
echo -e "${GREEN}${BOLD}╔════════════════════════════════════════════════════════════╗${NC}"
|
|
echo -e "${GREEN}${BOLD}║ Full reset completed successfully! ║${NC}"
|
|
echo -e "${GREEN}${BOLD}║ ║${NC}"
|
|
echo -e "${GREEN}${BOLD}║ The 2.0 system will now sync all data from 1.0 via CDC. ║${NC}"
|
|
echo -e "${GREEN}${BOLD}║ Monitor with: ./deploy-mining.sh logs contribution-service║${NC}"
|
|
echo -e "${GREEN}${BOLD}╚════════════════════════════════════════════════════════════╝${NC}"
|
|
}
|
|
|
|
# ===========================================================================
|
|
# Health Check Function
|
|
# ===========================================================================
|
|
health_check() {
|
|
print_section "2.0 System Health Check"
|
|
|
|
local all_healthy=true
|
|
|
|
# Check databases
|
|
echo -e "${BOLD}Databases:${NC}"
|
|
for db in "${MINING_DATABASES[@]}"; do
|
|
if run_psql "$db" "SELECT 1" &>/dev/null; then
|
|
echo -e " ${GREEN}✓${NC} $db"
|
|
else
|
|
echo -e " ${RED}✗${NC} $db"
|
|
all_healthy=false
|
|
fi
|
|
done
|
|
|
|
echo ""
|
|
echo -e "${BOLD}Services:${NC}"
|
|
for service in "${MINING_SERVICES[@]}"; do
|
|
port="${SERVICE_PORTS[$service]}"
|
|
health_response=$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:$port/health" 2>/dev/null || echo "000")
|
|
if [ "$health_response" = "200" ]; then
|
|
echo -e " ${GREEN}✓${NC} $service (port $port)"
|
|
else
|
|
echo -e " ${RED}✗${NC} $service (port $port)"
|
|
all_healthy=false
|
|
fi
|
|
done
|
|
|
|
echo ""
|
|
echo -e "${BOLD}Infrastructure:${NC}"
|
|
# Kafka
|
|
if nc -z ${KAFKA_BROKERS%%:*} ${KAFKA_BROKERS##*:} 2>/dev/null; then
|
|
echo -e " ${GREEN}✓${NC} Kafka ($KAFKA_BROKERS)"
|
|
else
|
|
echo -e " ${RED}✗${NC} Kafka ($KAFKA_BROKERS)"
|
|
all_healthy=false
|
|
fi
|
|
|
|
# Redis
|
|
if nc -z "$REDIS_HOST" "$REDIS_PORT" 2>/dev/null; then
|
|
echo -e " ${GREEN}✓${NC} Redis ($REDIS_HOST:$REDIS_PORT)"
|
|
else
|
|
echo -e " ${RED}✗${NC} Redis ($REDIS_HOST:$REDIS_PORT)"
|
|
all_healthy=false
|
|
fi
|
|
|
|
echo ""
|
|
if [ "$all_healthy" = true ]; then
|
|
echo -e "${GREEN}${BOLD}All systems healthy!${NC}"
|
|
else
|
|
echo -e "${RED}${BOLD}Some systems are unhealthy!${NC}"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# ===========================================================================
|
|
# Statistics Function
|
|
# ===========================================================================
|
|
show_stats() {
|
|
print_section "2.0 System Statistics"
|
|
|
|
local stats_sql="
|
|
SELECT tablename AS table,
|
|
pg_size_pretty(pg_total_relation_size(schemaname || '.' || tablename)) AS size
|
|
FROM pg_tables
|
|
WHERE schemaname = 'public'
|
|
ORDER BY pg_total_relation_size(schemaname || '.' || tablename) DESC
|
|
LIMIT 10;
|
|
"
|
|
|
|
for db in "${MINING_DATABASES[@]}"; do
|
|
echo -e "${BOLD}Database: $db${NC}"
|
|
|
|
if run_psql "$db" "SELECT 1" &>/dev/null; then
|
|
# Get table stats using Docker or local psql
|
|
if docker ps --format '{{.Names}}' 2>/dev/null | grep -q "^${POSTGRES_CONTAINER}$"; then
|
|
docker exec -e PGPASSWORD="$POSTGRES_PASSWORD" "$POSTGRES_CONTAINER" \
|
|
psql -h localhost -U "$POSTGRES_USER" -d "$db" -t -c "$stats_sql" 2>/dev/null || echo " Could not get table stats"
|
|
else
|
|
PGPASSWORD="$POSTGRES_PASSWORD" psql -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -U "$POSTGRES_USER" -d "$db" -t -c "$stats_sql" 2>/dev/null || echo " Could not get table stats"
|
|
fi
|
|
else
|
|
echo " Database not available"
|
|
fi
|
|
echo ""
|
|
done
|
|
}
|
|
|
|
# ===========================================================================
|
|
# Help Function
|
|
# ===========================================================================
|
|
show_help() {
|
|
print_header
|
|
|
|
echo "Usage: $0 <command> [service] [options]"
|
|
echo ""
|
|
echo -e "${BOLD}Service Management:${NC}"
|
|
echo " up [service] Start all or specific service"
|
|
echo " down [service] Stop all or specific service"
|
|
echo " restart [service] Restart all or specific service"
|
|
echo " status Show all service status"
|
|
echo " logs <service> [lines] View logs for specific service"
|
|
echo " build [service] [--no-cache] Build all or specific service"
|
|
echo " rebuild [service] Rebuild with --no-cache"
|
|
echo ""
|
|
echo -e "${BOLD}Database Management:${NC}"
|
|
echo " db-create [service] Create databases (all or for specific service)"
|
|
echo " db-migrate [service] Run Prisma migrations"
|
|
echo " db-reset [service] Drop and recreate databases ${RED}(DANGEROUS!)${NC}"
|
|
echo " db-status Show database status"
|
|
echo ""
|
|
echo -e "${BOLD}CDC / Sync Management:${NC}"
|
|
echo " sync-reset Reset CDC consumer to read from beginning"
|
|
echo " sync-status Show CDC consumer group status"
|
|
echo ""
|
|
echo -e "${BOLD}Full Reset:${NC}"
|
|
echo " full-reset Complete system reset ${RED}(DANGEROUS!)${NC}"
|
|
echo " Drops DBs, resets CDC, restarts services"
|
|
echo ""
|
|
echo -e "${BOLD}Health & Monitoring:${NC}"
|
|
echo " health Check health of all 2.0 components"
|
|
echo " stats Show system statistics"
|
|
echo ""
|
|
echo -e "${BOLD}2.0 Services:${NC}"
|
|
for service in "${MINING_SERVICES[@]}"; do
|
|
echo " - $service (port ${SERVICE_PORTS[$service]})"
|
|
done
|
|
echo ""
|
|
echo -e "${BOLD}Service Aliases:${NC}"
|
|
echo " contrib, contribution -> contribution-service"
|
|
echo " mining -> mining-service"
|
|
echo " trading -> trading-service"
|
|
echo " admin -> mining-admin-service"
|
|
echo " auth -> auth-service"
|
|
echo " wallet -> mining-wallet-service"
|
|
echo ""
|
|
echo -e "${BOLD}Examples:${NC}"
|
|
echo " $0 up # Start all services"
|
|
echo " $0 up mining # Start only mining-service"
|
|
echo " $0 restart contrib # Restart contribution-service"
|
|
echo " $0 build trading --no-cache # Rebuild trading-service"
|
|
echo " $0 logs admin 200 # Show last 200 lines of admin logs"
|
|
echo " $0 db-reset mining # Reset only mining-service database"
|
|
echo ""
|
|
echo -e "${YELLOW}Note: The 2.0 system is completely isolated from 1.0.${NC}"
|
|
echo -e "${YELLOW}Any reset operation will NOT affect the 1.0 system.${NC}"
|
|
}
|
|
|
|
# ===========================================================================
|
|
# Main
|
|
# ===========================================================================
|
|
main() {
|
|
load_env
|
|
|
|
case "${1:-}" in
|
|
# Service commands
|
|
up)
|
|
services_up "$2"
|
|
;;
|
|
down)
|
|
services_down "$2"
|
|
;;
|
|
restart)
|
|
services_restart "$2"
|
|
;;
|
|
status)
|
|
print_header
|
|
services_status
|
|
;;
|
|
logs)
|
|
services_logs "$2" "$3"
|
|
;;
|
|
build)
|
|
services_build "$2" "$3"
|
|
;;
|
|
rebuild)
|
|
services_rebuild "$2" "$3"
|
|
;;
|
|
|
|
# Database commands
|
|
db-create)
|
|
if [ -n "$2" ]; then
|
|
resolved=$(resolve_service_name "$2") && db_create "$resolved"
|
|
else
|
|
db_create
|
|
fi
|
|
;;
|
|
db-migrate)
|
|
if [ -n "$2" ]; then
|
|
resolved=$(resolve_service_name "$2") && db_migrate "$resolved"
|
|
else
|
|
db_migrate
|
|
fi
|
|
;;
|
|
db-reset)
|
|
if [ -n "$2" ]; then
|
|
resolved=$(resolve_service_name "$2") && db_reset "$resolved"
|
|
else
|
|
db_reset
|
|
fi
|
|
;;
|
|
db-status)
|
|
print_header
|
|
db_status
|
|
;;
|
|
|
|
# Sync commands
|
|
sync-reset)
|
|
sync_reset
|
|
;;
|
|
sync-status)
|
|
sync_status
|
|
;;
|
|
|
|
# Full reset
|
|
full-reset)
|
|
print_header
|
|
full_reset
|
|
;;
|
|
|
|
# Health & monitoring
|
|
health)
|
|
print_header
|
|
health_check
|
|
;;
|
|
stats)
|
|
print_header
|
|
show_stats
|
|
;;
|
|
|
|
# Help
|
|
help|--help|-h|"")
|
|
show_help
|
|
;;
|
|
|
|
*)
|
|
log_error "Unknown command: $1"
|
|
echo ""
|
|
show_help
|
|
exit 1
|
|
;;
|
|
esac
|
|
}
|
|
|
|
main "$@"
|