feat(mpc-system): 添加原生部署脚本,替代 Docker 部署

由于中国网络环境无法正常使用 Docker,添加原生部署方案:

新增文件:
- backend/mpc-system/scripts/deploy.sh - 一键部署脚本

脚本功能:
- install: 安装 Go 1.21、PostgreSQL、Redis、RabbitMQ
- build: 编译 Go 服务并配置基础设施
- start/stop/restart: 服务控制
- status: 查看服务状态
- logs: 查看服务日志
- uninstall: 卸载服务

更新文档:
- 更新 DEPLOYMENT_GUIDE.md 第 4 节为原生部署方式
- 添加 systemd 服务管理说明
- 添加 Nginx/iptables 端口映射配置
- 更新故障排查指南

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Developer 2025-12-01 23:24:16 -08:00
parent 42735e41ef
commit 70e6391691
2 changed files with 841 additions and 74 deletions

View File

@ -0,0 +1,595 @@
#!/bin/bash
#
# MPC-System Native Deployment Script (No Docker)
# For environments where Docker is not available (e.g., China)
#
# Usage:
# ./scripts/deploy.sh install # Install dependencies and build services
# ./scripts/deploy.sh start # Start all services
# ./scripts/deploy.sh stop # Stop all services
# ./scripts/deploy.sh restart # Restart all services
# ./scripts/deploy.sh status # Check service status
# ./scripts/deploy.sh logs # View logs
# ./scripts/deploy.sh uninstall # Remove all services
#
set -e
# ============================================
# Configuration
# ============================================
MPC_HOME="${MPC_HOME:-/opt/mpc-system}"
MPC_USER="${MPC_USER:-mpc}"
MPC_GROUP="${MPC_GROUP:-mpc}"
LOG_DIR="${MPC_HOME}/logs"
PID_DIR="${MPC_HOME}/pids"
BIN_DIR="${MPC_HOME}/bin"
CONFIG_DIR="${MPC_HOME}/config"
DATA_DIR="${MPC_HOME}/data"
# Service names
SERVICES=("account-service" "session-coordinator" "message-router" "server-party-1" "server-party-2" "server-party-3")
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# ============================================
# Helper Functions
# ============================================
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
check_root() {
if [ "$EUID" -ne 0 ]; then
log_error "This script must be run as root"
exit 1
fi
}
# ============================================
# Install Dependencies
# ============================================
install_dependencies() {
log_info "Installing system dependencies..."
# Update package list
apt-get update
# Install basic tools
apt-get install -y curl wget git build-essential
# Install Go 1.21
log_info "Installing Go 1.21..."
if ! command -v go &> /dev/null || [[ $(go version) != *"go1.21"* ]]; then
wget -q https://go.dev/dl/go1.21.13.linux-amd64.tar.gz -O /tmp/go.tar.gz
rm -rf /usr/local/go
tar -C /usr/local -xzf /tmp/go.tar.gz
rm /tmp/go.tar.gz
# Add Go to PATH for all users
echo 'export PATH=$PATH:/usr/local/go/bin' > /etc/profile.d/go.sh
source /etc/profile.d/go.sh
fi
log_info "Go version: $(go version)"
# Install PostgreSQL 15
log_info "Installing PostgreSQL 15..."
if ! command -v psql &> /dev/null; then
apt-get install -y postgresql postgresql-contrib
systemctl enable postgresql
systemctl start postgresql
fi
# Install Redis
log_info "Installing Redis..."
if ! command -v redis-server &> /dev/null; then
apt-get install -y redis-server
systemctl enable redis-server
systemctl start redis-server
fi
# Install RabbitMQ
log_info "Installing RabbitMQ..."
if ! command -v rabbitmqctl &> /dev/null; then
# Install Erlang first
apt-get install -y erlang-base erlang-nox erlang-dev erlang-src
# Install RabbitMQ
apt-get install -y rabbitmq-server
systemctl enable rabbitmq-server
systemctl start rabbitmq-server
# Enable management plugin
rabbitmq-plugins enable rabbitmq_management
fi
log_info "All dependencies installed successfully"
}
# ============================================
# Create User and Directories
# ============================================
setup_directories() {
log_info "Setting up directories..."
# Create mpc user if not exists
if ! id "$MPC_USER" &>/dev/null; then
useradd -r -s /bin/false -d "$MPC_HOME" "$MPC_USER"
fi
# Create directories
mkdir -p "$MPC_HOME" "$LOG_DIR" "$PID_DIR" "$BIN_DIR" "$CONFIG_DIR" "$DATA_DIR"
# Set permissions
chown -R "$MPC_USER:$MPC_GROUP" "$MPC_HOME"
chmod 755 "$MPC_HOME"
log_info "Directories created at $MPC_HOME"
}
# ============================================
# Configure Infrastructure
# ============================================
configure_postgres() {
log_info "Configuring PostgreSQL..."
# Load environment variables
source "$CONFIG_DIR/mpc.env" 2>/dev/null || true
local DB_USER="${POSTGRES_USER:-mpc_user}"
local DB_PASS="${POSTGRES_PASSWORD:-mpc_secret_password}"
local DB_NAME="mpc_system"
# Create database and user
sudo -u postgres psql -c "CREATE USER $DB_USER WITH PASSWORD '$DB_PASS';" 2>/dev/null || true
sudo -u postgres psql -c "CREATE DATABASE $DB_NAME OWNER $DB_USER;" 2>/dev/null || true
sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE $DB_NAME TO $DB_USER;" 2>/dev/null || true
# Run migrations
log_info "Running database migrations..."
PGPASSWORD="$DB_PASS" psql -h localhost -U "$DB_USER" -d "$DB_NAME" -f "$MPC_HOME/migrations/001_init_schema.up.sql" 2>/dev/null || log_warn "Migration may have already been applied"
log_info "PostgreSQL configured"
}
configure_redis() {
log_info "Configuring Redis..."
source "$CONFIG_DIR/mpc.env" 2>/dev/null || true
local REDIS_PASS="${REDIS_PASSWORD:-}"
if [ -n "$REDIS_PASS" ]; then
# Set Redis password
sed -i "s/^# requirepass.*/requirepass $REDIS_PASS/" /etc/redis/redis.conf
systemctl restart redis-server
fi
log_info "Redis configured"
}
configure_rabbitmq() {
log_info "Configuring RabbitMQ..."
source "$CONFIG_DIR/mpc.env" 2>/dev/null || true
local RABBIT_USER="${RABBITMQ_USER:-mpc_user}"
local RABBIT_PASS="${RABBITMQ_PASSWORD:-mpc_rabbit_password}"
# Create user
rabbitmqctl add_user "$RABBIT_USER" "$RABBIT_PASS" 2>/dev/null || rabbitmqctl change_password "$RABBIT_USER" "$RABBIT_PASS"
rabbitmqctl set_permissions -p / "$RABBIT_USER" ".*" ".*" ".*"
rabbitmqctl set_user_tags "$RABBIT_USER" administrator
log_info "RabbitMQ configured"
}
# ============================================
# Build Services
# ============================================
build_services() {
log_info "Building MPC services..."
# Get the script's directory (where the source code is)
local SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
export PATH=$PATH:/usr/local/go/bin
export GOPATH="$MPC_HOME/go"
export GOPROXY="https://goproxy.cn,direct" # Use Chinese proxy
cd "$SOURCE_DIR"
# Download dependencies
log_info "Downloading Go dependencies..."
go mod download
# Build account service
log_info "Building account-service..."
go build -o "$BIN_DIR/account-service" ./services/account/cmd/server/
# Build session coordinator
log_info "Building session-coordinator..."
go build -o "$BIN_DIR/session-coordinator" ./services/session-coordinator/cmd/server/
# Build message router
log_info "Building message-router..."
go build -o "$BIN_DIR/message-router" ./services/message-router/cmd/server/
# Build server party (single binary, different config for each party)
log_info "Building server-party..."
go build -o "$BIN_DIR/server-party" ./services/server-party/cmd/server/
# Copy migrations
cp -r "$SOURCE_DIR/migrations" "$MPC_HOME/"
# Set permissions
chmod +x "$BIN_DIR"/*
chown -R "$MPC_USER:$MPC_GROUP" "$BIN_DIR"
log_info "All services built successfully"
}
# ============================================
# Create Systemd Service Files
# ============================================
create_systemd_services() {
log_info "Creating systemd service files..."
# Common service template
create_service_file() {
local SERVICE_NAME=$1
local DESCRIPTION=$2
local EXEC_START=$3
local PARTY_ID=$4
cat > "/etc/systemd/system/$SERVICE_NAME.service" << EOF
[Unit]
Description=MPC System - $DESCRIPTION
After=network.target postgresql.service redis-server.service rabbitmq-server.service
Wants=postgresql.service redis-server.service rabbitmq-server.service
[Service]
Type=simple
User=$MPC_USER
Group=$MPC_GROUP
WorkingDirectory=$MPC_HOME
EnvironmentFile=$CONFIG_DIR/mpc.env
${PARTY_ID:+Environment=PARTY_ID=$PARTY_ID}
ExecStart=$EXEC_START
Restart=always
RestartSec=5
StandardOutput=append:$LOG_DIR/$SERVICE_NAME.log
StandardError=append:$LOG_DIR/$SERVICE_NAME.error.log
# Security settings
NoNewPrivileges=yes
ProtectSystem=strict
ProtectHome=yes
ReadWritePaths=$MPC_HOME
[Install]
WantedBy=multi-user.target
EOF
}
# Create service files
create_service_file "mpc-account" "Account Service" "$BIN_DIR/account-service"
create_service_file "mpc-session-coordinator" "Session Coordinator" "$BIN_DIR/session-coordinator"
create_service_file "mpc-message-router" "Message Router" "$BIN_DIR/message-router"
create_service_file "mpc-server-party-1" "Server Party 1" "$BIN_DIR/server-party" "server-party-1"
create_service_file "mpc-server-party-2" "Server Party 2" "$BIN_DIR/server-party" "server-party-2"
create_service_file "mpc-server-party-3" "Server Party 3" "$BIN_DIR/server-party" "server-party-3"
# Reload systemd
systemctl daemon-reload
log_info "Systemd services created"
}
# ============================================
# Create Environment Configuration
# ============================================
create_env_config() {
log_info "Creating environment configuration..."
if [ ! -f "$CONFIG_DIR/mpc.env" ]; then
cat > "$CONFIG_DIR/mpc.env" << 'EOF'
# MPC-System Environment Configuration
# Modify these values for your production environment
# Environment
ENVIRONMENT=production
# PostgreSQL Database
POSTGRES_USER=mpc_user
POSTGRES_PASSWORD=your_secure_postgres_password_here
MPC_DATABASE_HOST=localhost
MPC_DATABASE_PORT=5432
MPC_DATABASE_USER=mpc_user
MPC_DATABASE_PASSWORD=your_secure_postgres_password_here
MPC_DATABASE_DBNAME=mpc_system
MPC_DATABASE_SSLMODE=disable
# Redis Cache
REDIS_PASSWORD=
MPC_REDIS_HOST=localhost
MPC_REDIS_PORT=6379
MPC_REDIS_PASSWORD=
# RabbitMQ Message Queue
RABBITMQ_USER=mpc_user
RABBITMQ_PASSWORD=your_secure_rabbitmq_password_here
MPC_RABBITMQ_HOST=localhost
MPC_RABBITMQ_PORT=5672
MPC_RABBITMQ_USER=mpc_user
MPC_RABBITMQ_PASSWORD=your_secure_rabbitmq_password_here
# JWT Configuration
JWT_SECRET_KEY=your_super_secure_jwt_secret_key_at_least_32_characters
MPC_JWT_SECRET_KEY=your_super_secure_jwt_secret_key_at_least_32_characters
MPC_JWT_ISSUER=mpc-system
# Crypto Master Key (64 hex characters = 256-bit key)
CRYPTO_MASTER_KEY=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
MPC_CRYPTO_MASTER_KEY=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
# API Security
MPC_API_KEY=your_very_secure_api_key_at_least_32_characters
ALLOWED_IPS=192.168.1.111
# Server Configuration
MPC_SERVER_ENVIRONMENT=production
MPC_SERVER_HTTP_PORT=8080
MPC_SERVER_GRPC_PORT=50051
# Internal Service Addresses
SESSION_COORDINATOR_ADDR=localhost:50051
MESSAGE_ROUTER_ADDR=localhost:50052
EOF
chmod 600 "$CONFIG_DIR/mpc.env"
chown "$MPC_USER:$MPC_GROUP" "$CONFIG_DIR/mpc.env"
log_warn "Environment file created at $CONFIG_DIR/mpc.env"
log_warn "Please edit this file with your production values before starting services!"
else
log_info "Environment file already exists"
fi
}
# ============================================
# Service Control Functions
# ============================================
start_services() {
log_info "Starting MPC services..."
# Start infrastructure first
systemctl start postgresql
systemctl start redis-server
systemctl start rabbitmq-server
sleep 3
# Start MPC services in order
systemctl start mpc-session-coordinator
sleep 2
systemctl start mpc-message-router
sleep 2
systemctl start mpc-server-party-1
systemctl start mpc-server-party-2
systemctl start mpc-server-party-3
sleep 2
systemctl start mpc-account
log_info "All services started"
}
stop_services() {
log_info "Stopping MPC services..."
systemctl stop mpc-account 2>/dev/null || true
systemctl stop mpc-server-party-1 2>/dev/null || true
systemctl stop mpc-server-party-2 2>/dev/null || true
systemctl stop mpc-server-party-3 2>/dev/null || true
systemctl stop mpc-message-router 2>/dev/null || true
systemctl stop mpc-session-coordinator 2>/dev/null || true
log_info "All MPC services stopped"
}
restart_services() {
stop_services
sleep 2
start_services
}
enable_services() {
log_info "Enabling MPC services to start on boot..."
systemctl enable mpc-session-coordinator
systemctl enable mpc-message-router
systemctl enable mpc-server-party-1
systemctl enable mpc-server-party-2
systemctl enable mpc-server-party-3
systemctl enable mpc-account
log_info "Services enabled"
}
status_services() {
echo ""
echo "============================================"
echo "MPC System Service Status"
echo "============================================"
echo ""
# Infrastructure
echo "Infrastructure:"
echo " PostgreSQL: $(systemctl is-active postgresql)"
echo " Redis: $(systemctl is-active redis-server)"
echo " RabbitMQ: $(systemctl is-active rabbitmq-server)"
echo ""
# MPC Services
echo "MPC Services:"
echo " Session Coordinator: $(systemctl is-active mpc-session-coordinator)"
echo " Message Router: $(systemctl is-active mpc-message-router)"
echo " Server Party 1: $(systemctl is-active mpc-server-party-1)"
echo " Server Party 2: $(systemctl is-active mpc-server-party-2)"
echo " Server Party 3: $(systemctl is-active mpc-server-party-3)"
echo " Account Service: $(systemctl is-active mpc-account)"
echo ""
# Health check
echo "Health Check:"
if curl -s http://localhost:8080/health > /dev/null 2>&1; then
echo " Account Service API: ${GREEN}OK${NC}"
else
echo " Account Service API: ${RED}FAIL${NC}"
fi
echo ""
}
view_logs() {
local SERVICE="${2:-mpc-account}"
echo "Viewing logs for $SERVICE..."
echo "Press Ctrl+C to exit"
echo ""
if [ -f "$LOG_DIR/$SERVICE.log" ]; then
tail -f "$LOG_DIR/$SERVICE.log"
else
journalctl -u "$SERVICE" -f
fi
}
# ============================================
# Install Command
# ============================================
install() {
check_root
log_info "Starting MPC-System installation..."
install_dependencies
setup_directories
create_env_config
log_warn "Please edit the configuration file: $CONFIG_DIR/mpc.env"
log_warn "Then run: $0 build"
}
build() {
check_root
log_info "Building MPC-System..."
build_services
create_systemd_services
configure_postgres
configure_redis
configure_rabbitmq
enable_services
log_info "Build complete!"
log_info "Start services with: $0 start"
}
# ============================================
# Uninstall Command
# ============================================
uninstall() {
check_root
log_warn "This will remove all MPC services and data!"
read -p "Are you sure? (yes/no): " confirm
if [ "$confirm" != "yes" ]; then
log_info "Uninstall cancelled"
exit 0
fi
stop_services
# Disable and remove services
for service in mpc-account mpc-session-coordinator mpc-message-router mpc-server-party-1 mpc-server-party-2 mpc-server-party-3; do
systemctl disable "$service" 2>/dev/null || true
rm -f "/etc/systemd/system/$service.service"
done
systemctl daemon-reload
# Remove directories (keep data by default)
rm -rf "$BIN_DIR" "$PID_DIR"
log_info "MPC services removed"
log_warn "Data directory preserved at: $DATA_DIR"
log_warn "Config directory preserved at: $CONFIG_DIR"
log_warn "To completely remove, run: rm -rf $MPC_HOME"
}
# ============================================
# Main
# ============================================
case "${1:-}" in
install)
install
;;
build)
build
;;
start)
start_services
;;
stop)
stop_services
;;
restart)
restart_services
;;
status)
status_services
;;
logs)
view_logs "$@"
;;
uninstall)
uninstall
;;
*)
echo "MPC-System Deployment Script"
echo ""
echo "Usage: $0 {install|build|start|stop|restart|status|logs|uninstall}"
echo ""
echo "Commands:"
echo " install - Install system dependencies (Go, PostgreSQL, Redis, RabbitMQ)"
echo " build - Build services and configure infrastructure"
echo " start - Start all MPC services"
echo " stop - Stop all MPC services"
echo " restart - Restart all MPC services"
echo " status - Show service status"
echo " logs - View service logs (use: $0 logs [service-name])"
echo " uninstall - Remove MPC services"
echo ""
echo "Example:"
echo " $0 install # First time setup"
echo " vim $CONFIG_DIR/mpc.env # Edit configuration"
echo " $0 build # Build and configure"
echo " $0 start # Start services"
echo " $0 status # Check status"
echo ""
exit 1
;;
esac

View File

@ -471,123 +471,226 @@ server {
## 4. MPC-System 部署 (192.168.1.100)
MPC-System 是一组 Go 微服务,负责 TSS (Threshold Signature Scheme) 密钥生成和签名操作。
采用 2-of-3 阈值签名方案,包含多个内部服务通过 Docker Compose 一键部署。
采用 2-of-3 阈值签名方案,包含多个内部服务。
> **注意**: 由于中国网络环境无法正常使用 Docker本部署采用原生安装方式。
### 4.1 系统要求
- Docker 24.0+
- Docker Compose v2.20+
- Ubuntu 20.04+ / Debian 11+
- Go 1.21+ (脚本自动安装)
- PostgreSQL 15+ (脚本自动安装)
- Redis 7+ (脚本自动安装)
- RabbitMQ 3+ (脚本自动安装)
- 最小 4GB RAM
- 20GB 磁盘空间
### 4.2 服务架构
```
MPC-System (Docker Compose)
├── account-service # 对外 API 入口 (端口 4000)
├── session-coordinator # 会话协调器
├── message-router # 消息路由
├── server-party-1 # TSS 参与方 1
├── server-party-2 # TSS 参与方 2
├── server-party-3 # TSS 参与方 3
├── postgres # 数据库
├── redis # 缓存
└── rabbitmq # 消息队列
MPC-System (原生部署)
├── mpc-account # 对外 API 入口 (端口 8080 → 4000)
├── mpc-session-coordinator # 会话协调器 (gRPC :50051)
├── mpc-message-router # 消息路由 (gRPC :50052)
├── mpc-server-party-1 # TSS 参与方 1
├── mpc-server-party-2 # TSS 参与方 2
├── mpc-server-party-3 # TSS 参与方 3
└── 基础设施 (PostgreSQL, Redis, RabbitMQ)
```
### 4.3 一键部署 (Docker Compose)
### 4.3 一键部署脚本
MPC-System 提供了一键部署脚本 `scripts/deploy.sh`,支持以下命令:
| 命令 | 说明 |
|------|------|
| `install` | 安装系统依赖 (Go, PostgreSQL, Redis, RabbitMQ) |
| `build` | 编译服务并配置基础设施 |
| `start` | 启动所有 MPC 服务 |
| `stop` | 停止所有 MPC 服务 |
| `restart` | 重启所有 MPC 服务 |
| `status` | 查看服务状态 |
| `logs` | 查看服务日志 |
| `uninstall` | 卸载 MPC 服务 |
### 4.4 部署步骤
```bash
# ============================================
# 步骤 1: 安装 Docker
# ============================================
curl -fsSL https://get.docker.com | sh
usermod -aG docker $USER
# 重新登录以使 docker 组生效
# ============================================
# 步骤 2: 克隆代码
# 步骤 1: 克隆代码
# ============================================
cd /home/ceshi
git clone https://github.com/your-org/rwadurian.git
cd rwadurian/backend/mpc-system
# ============================================
# 步骤 2: 安装依赖
# ============================================
# 需要 root 权限
sudo bash scripts/deploy.sh install
# 脚本将自动安装:
# - Go 1.21 (从 go.dev 下载,使用 goproxy.cn 加速)
# - PostgreSQL 15
# - Redis 7
# - RabbitMQ 3
# ============================================
# 步骤 3: 配置环境变量
# ============================================
cp .env.example .env
vim .env # 修改为生产环境配置
sudo vim /opt/mpc-system/config/mpc.env
```
**.env 文件内容:**
**mpc.env 配置文件内容:**
```bash
# MPC-System 环境配置
# 请修改以下值为生产环境的安全密码
# 环境标识
ENVIRONMENT=production
# PostgreSQL 数据库
POSTGRES_USER=mpc_user
POSTGRES_PASSWORD=your_secure_postgres_password_here
MPC_DATABASE_HOST=localhost
MPC_DATABASE_PORT=5432
MPC_DATABASE_USER=mpc_user
MPC_DATABASE_PASSWORD=your_secure_postgres_password_here
MPC_DATABASE_DBNAME=mpc_system
MPC_DATABASE_SSLMODE=disable
# Redis 缓存 (留空表示不需要密码)
REDIS_PASSWORD=
MPC_REDIS_HOST=localhost
MPC_REDIS_PORT=6379
MPC_REDIS_PASSWORD=
# RabbitMQ 消息队列
RABBITMQ_USER=mpc_user
RABBITMQ_PASSWORD=your_secure_rabbitmq_password_here
MPC_RABBITMQ_HOST=localhost
MPC_RABBITMQ_PORT=5672
MPC_RABBITMQ_USER=mpc_user
MPC_RABBITMQ_PASSWORD=your_secure_rabbitmq_password_here
# JWT 配置 (至少 32 字符)
JWT_SECRET_KEY=your_super_secure_jwt_secret_key_at_least_32_characters
MPC_JWT_SECRET_KEY=your_super_secure_jwt_secret_key_at_least_32_characters
MPC_JWT_ISSUER=mpc-system
# 主加密密钥 (64 位十六进制 = 256 位密钥,用于加密密钥分片)
CRYPTO_MASTER_KEY=your_64_hex_characters_master_key_here
MPC_CRYPTO_MASTER_KEY=your_64_hex_characters_master_key_here
# API 认证密钥 (与 mpc-service 配置的 MPC_API_KEY 一致)
MPC_API_KEY=your_very_secure_api_key_at_least_32_characters
# 允许访问的 IP 地址 (后端服务器)
ALLOWED_IPS=192.168.1.111
# 服务配置
MPC_SERVER_ENVIRONMENT=production
MPC_SERVER_HTTP_PORT=8080
MPC_SERVER_GRPC_PORT=50051
# 内部服务地址
SESSION_COORDINATOR_ADDR=localhost:50051
MESSAGE_ROUTER_ADDR=localhost:50052
```
```bash
# ============================================
# 步骤 4: 构建并启动服务
# 步骤 4: 编译并配置服务
# ============================================
docker compose up -d --build
sudo bash scripts/deploy.sh build
# 此步骤会:
# - 编译所有 Go 服务到 /opt/mpc-system/bin/
# - 创建 systemd 服务文件
# - 配置 PostgreSQL 数据库和用户
# - 配置 Redis
# - 配置 RabbitMQ 用户
# - 运行数据库迁移
# ============================================
# 步骤 5: 启动服务
# ============================================
sudo bash scripts/deploy.sh start
# ============================================
# 步骤 6: 验证部署
# ============================================
# 查看服务状态
docker compose ps
sudo bash scripts/deploy.sh status
# 健康检查
curl http://localhost:8080/health
# 查看日志
docker compose logs -f
# ============================================
# 步骤 5: 验证部署
# ============================================
# 健康检查
curl http://localhost:4000/health
# 查看所有容器状态
docker compose ps
sudo bash scripts/deploy.sh logs mpc-account
```
### 4.4 服务端口说明
### 4.5 配置 Nginx 反向代理 (端口 4000)
| 服务 | 容器内端口 | 主机端口 | 说明 |
|------|-----------|---------|------|
| account-service | 8080 | **4000** | 对外 API 入口,供 mpc-service 调用 |
| session-coordinator | 8080, 50051 | - | 会话协调 (内部) |
| message-router | 8080, 50051 | - | 消息路由 (内部) |
| server-party-1/2/3 | 8080, 50051 | - | TSS 参与方 (内部) |
| postgres | 5432 | - | 数据库 (内部) |
| redis | 6379 | - | 缓存 (内部) |
| rabbitmq | 5672, 15672 | - | 消息队列 (内部) |
为了保持与 Docker 部署相同的端口映射 (4000 → 8080),配置 Nginx:
> **注意**: 生产环境仅暴露端口 4000其他服务仅在 Docker 内部网络可访问。
```bash
# 创建 MPC 代理配置
cat > /etc/nginx/conf.d/mpc-proxy.conf << 'EOF'
# MPC-System 内部代理
# 将外部端口 4000 代理到内部 8080
### 4.5 MPC-System API 端点
server {
listen 4000;
server_name localhost;
Account Service (端口 4000) 对外提供以下 API:
location / {
proxy_pass http://127.0.0.1:8080;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# 仅允许后端服务器访问
allow 192.168.1.111;
deny all;
}
}
EOF
# 测试并重载 Nginx
nginx -t && nginx -s reload
```
或者直接使用 iptables 端口转发:
```bash
# 端口转发: 4000 → 8080
iptables -t nat -A PREROUTING -p tcp --dport 4000 -j REDIRECT --to-port 8080
iptables -t nat -A OUTPUT -p tcp --dport 4000 -j REDIRECT --to-port 8080
# 保存规则
iptables-save > /etc/iptables/rules.v4
```
### 4.6 服务端口说明
| 服务 | 端口 | 说明 |
|------|------|------|
| mpc-account | 8080 (→ 4000) | 对外 API 入口,供 mpc-service 调用 |
| mpc-session-coordinator | 50051 | 会话协调 gRPC (内部) |
| mpc-message-router | 50052 | 消息路由 gRPC (内部) |
| mpc-server-party-1/2/3 | 8081-8083 | TSS 参与方 HTTP (内部) |
| PostgreSQL | 5432 | 数据库 (内部) |
| Redis | 6379 | 缓存 (内部) |
| RabbitMQ | 5672 | 消息队列 (内部) |
> **注意**: 生产环境仅对外暴露端口 4000 (通过 Nginx 或 iptables 代理到 8080)
### 4.7 MPC-System API 端点
Account Service (端口 4000/8080) 对外提供以下 API:
| 端点 | 方法 | 说明 |
|------|------|------|
@ -596,42 +699,107 @@ Account Service (端口 4000) 对外提供以下 API:
| `/api/v1/account/sign` | POST | 请求签名 |
| `/api/v1/account/recovery` | POST | 账户恢复 |
### 4.6 防火墙配置
### 4.8 防火墙配置
```bash
# 只允许后端服务器 (192.168.1.111) 访问 MPC-System 端口 4000
# 只允许后端服务器 (192.168.1.111) 访问 MPC-System 端口 4000/8080
iptables -A INPUT -p tcp --dport 4000 -s 192.168.1.111 -j ACCEPT
iptables -A INPUT -p tcp --dport 4000 -j DROP
iptables -A INPUT -p tcp --dport 8080 -s 192.168.1.111 -j ACCEPT
iptables -A INPUT -p tcp --dport 8080 -j DROP
# 保存规则
iptables-save > /etc/iptables/rules.v4
```
### 4.7 运维命令
### 4.9 运维命令
```bash
cd /home/ceshi/rwadurian/backend/mpc-system
# 查看服务状态
docker compose ps
sudo bash scripts/deploy.sh status
# 或使用 systemctl
systemctl status mpc-account
systemctl status mpc-session-coordinator
systemctl status mpc-message-router
systemctl status mpc-server-party-1
systemctl status mpc-server-party-2
systemctl status mpc-server-party-3
# 查看日志
docker compose logs -f # 所有服务
docker compose logs -f account-service # 单个服务
docker compose logs --tail=100 account-service # 最近 100 行
sudo bash scripts/deploy.sh logs mpc-account
# 或
journalctl -u mpc-account -f
tail -f /opt/mpc-system/logs/mpc-account.log
# 重启服务
docker compose restart # 重启所有
docker compose restart account-service # 重启单个
# 重启单个服务
systemctl restart mpc-account
# 停止服务
docker compose down
# 重启所有 MPC 服务
sudo bash scripts/deploy.sh restart
# 重新构建并启动
docker compose up -d --build
# 停止所有 MPC 服务
sudo bash scripts/deploy.sh stop
# 清理数据 (危险! 会删除所有数据)
docker compose down -v
# 启动所有 MPC 服务
sudo bash scripts/deploy.sh start
# 重新编译 (代码更新后)
cd /home/ceshi/rwadurian/backend/mpc-system
git pull
sudo bash scripts/deploy.sh build
sudo bash scripts/deploy.sh restart
```
### 4.10 目录结构
安装后的目录结构:
```
/opt/mpc-system/
├── bin/ # 编译后的可执行文件
│ ├── account-service
│ ├── session-coordinator
│ ├── message-router
│ └── server-party
├── config/
│ └── mpc.env # 环境配置文件
├── data/ # 数据目录
├── logs/ # 日志目录
│ ├── mpc-account.log
│ ├── mpc-account.error.log
│ ├── mpc-session-coordinator.log
│ └── ...
├── migrations/ # 数据库迁移文件
│ └── 001_init_schema.up.sql
└── pids/ # PID 文件目录
```
### 4.11 故障排查
```bash
# 检查服务是否在运行
sudo bash scripts/deploy.sh status
# 检查端口是否监听
ss -tlnp | grep -E '8080|50051|50052'
# 检查 PostgreSQL 连接
sudo -u postgres psql -c "SELECT 1;"
# 检查 Redis 连接
redis-cli ping
# 检查 RabbitMQ 连接
rabbitmqctl status
# 查看服务错误日志
tail -100 /opt/mpc-system/logs/mpc-account.error.log
# 手动启动服务调试
sudo -u mpc /opt/mpc-system/bin/account-service
```
## 5. 后端服务器配置 (192.168.1.111)
@ -1115,8 +1283,11 @@ grep "/api/v1/user" /var/log/nginx/access.log | tail -100
# 192.168.1.100 (Nginx + MPC 服务器) 日志
# ========================================
# MPC-System 日志
tail -f /var/log/mpc-system/mpc.log
journalctl -u mpc-system -f
cd /home/ceshi/rwadurian/backend/mpc-system
sudo bash scripts/deploy.sh logs mpc-account
# 或
tail -f /opt/mpc-system/logs/mpc-account.log
journalctl -u mpc-account -f
# ========================================
# 192.168.1.111 (后端服务器) 日志
@ -1139,7 +1310,8 @@ docker compose logs --tail=100 identity-service
# ========================================
# 192.168.1.100 (Nginx + MPC 服务器) 健康检查
# ========================================
curl http://192.168.1.100:4000/health # MPC-System
curl http://192.168.1.100:8080/health # MPC-System (原生端口)
curl http://192.168.1.100:4000/health # MPC-System (代理端口)
curl https://rwaapi.szaiai.com/health # Nginx 反向代理
# ========================================
@ -1195,11 +1367,11 @@ docker compose up -d --build identity-service
- 检查服务名解析: `docker compose exec identity-service ping wallet-service`
### Q5: MPC-System 连接失败
- 检查 MPC-System 服务状态: `systemctl status mpc-system` (在 192.168.1.100 上)
- 检查 MPC-System 健康: `curl http://192.168.1.100:4000/health`
- 检查防火墙规则是否允许 192.168.1.111 访问 4000 端口
- 检查 MPC-System 服务状态: `sudo bash scripts/deploy.sh status` (在 192.168.1.100 上)
- 检查 MPC-System 健康: `curl http://192.168.1.100:4000/health``curl http://192.168.1.100:8080/health`
- 检查防火墙规则是否允许 192.168.1.111 访问 4000/8080 端口
- 检查 mpc-service 环境变量 `MPC_SYSTEM_URL` 是否正确设置为 `http://192.168.1.100:4000`
- 查看 MPC-System 日志: `tail -f /var/log/mpc-system/mpc.log`
- 查看 MPC-System 日志: `tail -f /opt/mpc-system/logs/mpc-account.log`
### Q6: TSS 密钥生成/签名超时
- MPC 操作可能需要较长时间,检查超时配置