diff --git a/backend/services/deploy-mining.sh b/backend/services/deploy-mining.sh index 73a11122..923e79d0 100755 --- a/backend/services/deploy-mining.sh +++ b/backend/services/deploy-mining.sh @@ -881,16 +881,16 @@ full_reset() { fi echo "" - log_step "Step 1/14: Stopping 2.0 services..." + log_step "Step 1/16: Stopping 2.0 services..." for service in "${MINING_SERVICES[@]}"; do service_stop "$service" done - log_step "Step 2/14: Waiting for Kafka consumers to become inactive..." + log_step "Step 2/16: Waiting for Kafka consumers to become inactive..." log_info "Waiting 15 seconds for consumer group session timeout..." sleep 15 - log_step "Step 3/14: Resetting CDC consumer offsets..." + log_step "Step 3/16: Resetting CDC consumer offsets..." # Reset offsets BEFORE migrations (which may start containers) for group in "${CDC_CONSUMER_GROUPS[@]}"; do log_info "Resetting consumer group: $group" @@ -927,17 +927,17 @@ full_reset() { fi done - log_step "Step 4/14: Dropping 2.0 databases..." + log_step "Step 4/16: Dropping 2.0 databases..." db_drop - log_step "Step 5/14: Creating 2.0 databases..." + log_step "Step 5/16: Creating 2.0 databases..." db_create - log_step "Step 6/14: Running migrations..." + log_step "Step 6/16: Running migrations..." db_migrate # Stop any containers that were started during migration - log_step "Step 7/14: Stopping containers and resetting CDC offsets again..." + log_step "Step 7/16: Stopping containers and resetting CDC offsets again..." log_info "Migration may have started CDC consumers, stopping them now..." for service in "${MINING_SERVICES[@]}"; do docker compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" stop "$service" 2>/dev/null || true @@ -981,22 +981,22 @@ full_reset() { fi done - log_step "Step 8/14: Registering Debezium outbox connectors..." + log_step "Step 8/16: Registering Debezium outbox connectors..." # Register outbox connectors for 2.0 service events # These connectors capture events from each service's outbox table and send to Kafka # mining-admin-service consumes these events to aggregate data from all 2.0 services register_outbox_connectors || log_warn "Some connectors may not be registered" - log_step "Step 9/14: Starting 2.0 services..." + log_step "Step 9/16: Starting 2.0 services..." for service in "${MINING_SERVICES[@]}"; do service_start "$service" done - log_step "Step 10/14: Waiting for services to be ready..." + log_step "Step 10/16: Waiting for services to be ready..." log_info "Waiting 20 seconds for all services to start and sync from 1.0 CDC..." sleep 20 - log_step "Step 11/14: Publishing legacy users to mining-admin-service..." + log_step "Step 11/16: Publishing legacy users to mining-admin-service..." # 调用 auth-service API 发布所有旧用户事件到 outbox # 这样 mining-admin-service 才能通过 Debezium 收到用户数据 local publish_url="http://localhost:3024/api/v2/admin/legacy-users/publish-all" @@ -1012,7 +1012,7 @@ full_reset() { log_info "You may need to manually call: curl -X POST $publish_url" fi - log_step "Step 12/14: Publishing contribution data to mining-admin-service..." + log_step "Step 12/16: Publishing contribution data to mining-admin-service..." # 调用 contribution-service API 发布所有算力账户事件到 outbox local contrib_publish_url="http://localhost:3020/api/v2/admin/contribution-accounts/publish-all" local contrib_result @@ -1027,7 +1027,7 @@ full_reset() { log_info "You may need to manually call: curl -X POST $contrib_publish_url" fi - log_step "Step 13/14: Publishing referral relationships to mining-admin-service..." + log_step "Step 13/16: Publishing referral relationships to mining-admin-service..." # 调用 contribution-service API 发布所有推荐关系事件到 outbox local referral_publish_url="http://localhost:3020/api/v2/admin/referrals/publish-all" local referral_result @@ -1042,7 +1042,7 @@ full_reset() { log_info "You may need to manually call: curl -X POST $referral_publish_url" fi - log_step "Step 14/17: Publishing adoption records to mining-admin-service..." + log_step "Step 14/16: Publishing adoption records to mining-admin-service..." # 调用 contribution-service API 发布所有认种记录事件到 outbox local adoption_publish_url="http://localhost:3020/api/v2/admin/adoptions/publish-all" local adoption_result @@ -1057,22 +1057,13 @@ full_reset() { log_info "You may need to manually call: curl -X POST $adoption_publish_url" fi - log_step "Step 15/17: Publishing contribution records to mining-admin-service..." - # 调用 contribution-service API 发布所有算力记录事件到 outbox - local records_publish_url="http://localhost:3020/api/v2/admin/contribution-records/publish-all" - local records_result - records_result=$(curl -s -X POST "$records_publish_url" 2>/dev/null || echo '{"error": "curl failed"}') + # NOTE: contribution-records/publish-all is NOT called here because: + # - Contribution records are already published to outbox when calculated by contribution-service + # - Debezium automatically captures outbox_events and sends to Kafka + # - Calling publish-all again would cause duplicate records in mining-admin-service + # - See: contribution-calculation.service.ts -> publishContributionRecordEvents() - if echo "$records_result" | grep -q '"success":true'; then - local records_count - records_count=$(echo "$records_result" | grep -o '"publishedCount":[0-9]*' | grep -o '[0-9]*') - log_success "Published $records_count contribution record events to outbox" - else - log_warn "Failed to publish contribution records: $records_result" - log_info "You may need to manually call: curl -X POST $records_publish_url" - fi - - log_step "Step 16/17: Publishing network progress to mining-admin-service..." + log_step "Step 15/16: Publishing network progress to mining-admin-service..." # 调用 contribution-service API 发布全网进度事件到 outbox local progress_publish_url="http://localhost:3020/api/v2/admin/network-progress/publish" local progress_result @@ -1085,7 +1076,7 @@ full_reset() { log_info "You may need to manually call: curl -X POST $progress_publish_url" fi - log_step "Step 17/17: Waiting for mining-admin-service to sync all data..." + log_step "Step 16/16: Waiting for mining-admin-service to sync all data..." # 等待 mining-admin-service 消费 outbox 事件 log_info "Waiting 15 seconds for mining-admin-service to sync all data..." sleep 15